source
stringlengths
3
86
python
stringlengths
75
1.04M
http-proxy.py
import socket import re import threading import time pl = 80 #printing length for debugging cache_map = {} def validate(buffer): temp = buffer.split(b'\r\n') request_line = temp[0].decode() request_headers = [] for item in temp[1:]: if len(item) != 0: request_headers.append(item.decode()) ### CHECKING request line try: method, path, version = request_line.split() except: return None, None, None, "400 Bad Request 133" if method != "GET" or not( re.match(r'http/',version.lower()) ): return None, None, None, "501 Not Implemented" port = 80 # set default port ### if full path is provided will split to relative URL + Host header if path[0] != "/": x = re.match(r'(https?:\/\/)?(.+?)(:[0-9]*)?(\/.*)', path) #2 host name, 3 port number, 4 relative path if x.group(2): host_name = x.group(2) host_header = host_name if x.group(3): port = x.group(3)[1:] host_header = host_name + x.group(3) relative_url = x.group(4) ### see if request header was provided if yes remove it for item in request_headers: if item.lower().find("host:") != -1: request_headers.remove(item) break ### add the host header host_header = "Host: "+host_header request_headers.insert(0,host_header) path = relative_url ### if realtive url is provided then there must exist a host header else: host_header_flag = False for item in request_headers: if item.lower().find("host:") != -1: host_name = item[6:] ### try to extract port number if provided try: x = re.match(r'(.*:)([0-9]*)?',host_name) host_name = x.group(1)[0:-1] port = x.group(2) except: pass host_header_flag = True break if not host_header_flag: return None, None, None, "400 Bad Request, Host header not provided" ### check if headers are properly formatted for item in request_headers: if not (re.match(r'.*\: .*', item)): return None, None, None, "400 Bad Request, Header Not properly formatted" space = b' ' crlf = b'\r\n' packet = method.encode() + space + path.encode() + \ space + version.encode() + crlf for item in request_headers: packet = packet + item.encode() + crlf packet = packet + crlf return packet, host_name, int(port), "0" def error_response(error_code, client_socket, client_address): html = f"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html><head> <title>{error_code}</title> </head><body> <h1>PROXY ERROR</h1> <p>{error_code}.<br /> </p> </body></html>""" size = str(len(html.encode())) crlf = b'\r\n' error_msg = b'HTTP/1.0 ' + error_code.encode() + crlf + b'Content-Type: text/html; charset=UTF-8' + \ crlf + b'Content-Length: '+size.encode() + crlf + crlf error_msg = error_msg + html.encode() + b'\n' print("PROXY RESPONDED WITH AN ERROR TO ", client_address," : ", error_code,"\n") client_socket.sendall(error_msg) return def my_recv(s): s.setblocking(0) s.settimeout(0.5) timeout = 3 temp = b'' response = b'' begin = time.time() while True: #if u got a response break after timeout if len(response) != 0 and time.time() - begin > timeout: break #if u dont have response yet wait twice the timout elif time.time() - begin > timeout*2: return 0 break try: temp = s.recv(10000) if len(temp) != 0: response = response + temp begin = time.time() except: pass return response def ok_response(packet, host, port, client_socket,client_address): print("SENDING REQUEST OF ",client_address," : ",packet[0:pl],"\n") s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try : s.connect((host, port)) except: print("ERROR CONNECTING TO HOST, PORT SPECIFIED") return s.sendall(packet) response = my_recv(s) s.close() if response == 0: print("ERROR NO RESPONSE FROM REMOTE SERVER") return 0 print("RESPONSE BACK TO ",client_address," : ", response[0:pl],"\n") client_socket.sendall(response) return response ### if date is in their dont store in cache def use_cache(request,client_socket): if request in cache_map.keys(): response = cache_map[request] print("\n[From Cache] RESPONSE BACK TO ",client_socket.getpeername()," : ", response[0:pl],"\n") client_socket.sendall(response) return 1 else : return 0 def store_cache(request, response): cache_map[request] = response return def main(client_socket, client_address): buffer = b'' while True: data = client_socket.recv(50*1024) buffer = buffer + data if buffer.find(b'\r\n\r\n') > 0 : print("RECEIVED FROM ",client_address," : ", buffer[0:pl]) packet, host, port, error = validate(buffer) if error != "0" : error_response(error, client_socket, client_address) elif use_cache(packet,client_socket) == 0 : response = ok_response(packet, host, port, client_socket,client_address) if response != 0: # a response exist store_cache(packet,response) client_socket.close() break def acceptor(): temp = 0 while True: client_socket, client_address = server_socket.accept() x = threading.Thread(target=main, args=(client_socket,client_address)) x.start() if __name__ == "__main__": server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(("127.0.0.21", 2121)) server_socket.listen(100) print("\nWaiting for clients...\n") acceptor()
threading_event.py
#!/usr/bin/env python # -*- coding:utf-8 -*- # Author:Lyon import threading import time import random def conn_mysql(): count = 1 while not event.is_set(): if count > 3: raise TimeoutError('Connection timeout...') print('%s %sth attempt to connect' % (threading.current_thread().getName(), count)) event.wait(0.5) count += 1 print('%s connect successfully' % threading.current_thread().getName()) def check_mysql(): print('%s is checking mysql' % threading.current_thread().getName()) time.sleep(random.randint(2, 4)) event.set() if __name__ == '__main__': event = threading.Event() conn1 = threading.Thread(target=conn_mysql) conn2 = threading.Thread(target=conn_mysql) check = threading.Thread(target=check_mysql) conn1.start() conn2.start() check.start()
launcher.py
#/usr/bin/env python3 # Launcher for depthai_demo.py which provides updating capabilities # Standard imports import os, sys, subprocess, time, threading, argparse, datetime import re from pathlib import Path # Import splash screen from splash_screen import SplashScreen # Import version parser from packaging import version # PyQt5 from PyQt5 import QtCore, QtGui, QtWidgets # Constants SCRIPT_DIRECTORY=Path(os.path.abspath(os.path.dirname(__file__))) DEPTHAI_DEMO_SCRIPT='depthai_demo.py' DEPTHAI_INSTALL_REQUIREMENTS_SCRIPT='install_requirements.py' DEFAULT_GIT_PATH='git' DEPTHAI_REPOSITORY_NAME = 'depthai' DEPTHAI_REMOTE_REPOSITORY_URL = 'https://github.com/luxonis/depthai.git' LOG_FILE_PATH=Path(SCRIPT_DIRECTORY/'log.dat') # Parse arguments parser = argparse.ArgumentParser() parser.add_argument('-r', '--repo', help='Path to DepthAI Git repository', default=SCRIPT_DIRECTORY/'..') parser.add_argument('-g', '--git', help='Path to Git executable. Default: %(default)s', default=DEFAULT_GIT_PATH) parser.add_argument('--disable-git', help='Disable git requirement and updating capability', default=False, action='store_true') args = parser.parse_args() pathToDepthaiRepository = args.repo gitExecutable = args.git if args.disable_git: gitExecutable = '' # Create a logger class Logger(object): def __init__(self): self.terminal = sys.stdout self.log = open(LOG_FILE_PATH, 'a') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): self.terminal.flush() self.log.flush() # Write both stdout and stderr to log files # Note - doesn't work for subprocesses. # Do proper fd dup for that case logger = Logger() sys.stdout = logger sys.stderr = logger print(f'========= Starting: Launcher ({datetime.datetime.now()}) =========') qApp = QtWidgets.QApplication(['DepthAI Launcher']) # Set style #print(PyQt5.QtWidgets.QStyleFactory.keys()) #qApp.setStyle('Fusion') # Set default Window icon qApp.setWindowIcon(QtGui.QIcon(str(SCRIPT_DIRECTORY/'splash2.png'))) # Create splash screen splashScreen = SplashScreen(str(SCRIPT_DIRECTORY/'splash2.png')) def closeSplash(): splashScreen.hide() class Worker(QtCore.QThread): signalUpdateQuestion = QtCore.pyqtSignal(str, str) sigInfo = QtCore.pyqtSignal(str, str) sigCritical = QtCore.pyqtSignal(str, str) sigWarning = QtCore.pyqtSignal(str, str) # Should update if a new version is available? shouldUpdate = True @QtCore.pyqtSlot(str,str) def updateQuestion(self, title, message): ret = QtWidgets.QMessageBox.question(splashScreen, title, message, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes) if ret == QtWidgets.QMessageBox.Yes: self.shouldUpdate = True return True else: self.shouldUpdate = False return False @QtCore.pyqtSlot(str,str) def showInformation(self, title, message): QtWidgets.QMessageBox.information(splashScreen, title, message) @QtCore.pyqtSlot(str,str) def showWarning(self, title, message): QtWidgets.QMessageBox.warning(splashScreen, title, message) @QtCore.pyqtSlot(str,str) def showCritical(self, title, message): QtWidgets.QMessageBox.critical(splashScreen, title, message) def __init__(self, parent = None): QtCore.QThread.__init__(self, parent) self.signalUpdateQuestion[str, str].connect(self.updateQuestion, QtCore.Qt.BlockingQueuedConnection) self.sigInfo[str, str].connect(self.showInformation, QtCore.Qt.BlockingQueuedConnection) self.sigCritical[str, str].connect(self.showCritical, QtCore.Qt.BlockingQueuedConnection) self.sigWarning[str, str].connect(self.showWarning, QtCore.Qt.BlockingQueuedConnection) def __del__(self): self.exiting = True try: self.wait() except: pass def run(self): try: # New version available? newVersionAvailable = False # Current version name currentVersion = 'Unknown' newVersion = 'Unknown' newVersionTag = 'vUnknown' lastCall = '' try: # Check if 'disable git' option was specified if gitExecutable != '': # Check if repository exists if os.path.isdir(pathToDepthaiRepository) and subprocess.run([gitExecutable, 'status'], cwd=pathToDepthaiRepository).returncode == 0: pass else: # DepthAI repo not available, clone first splashScreen.updateSplashMessage('Cloning DepthAI Repository ...') splashScreen.enableHeartbeat(True) # Repository doesn't exists, clone first subprocess.check_call([gitExecutable, 'clone', DEPTHAI_REMOTE_REPOSITORY_URL, DEPTHAI_REPOSITORY_NAME], cwd=SCRIPT_DIRECTORY) # Fetch changes # Save error of an possible no internet connection scenario lastCall = subprocess.run([gitExecutable, 'fetch'], cwd=pathToDepthaiRepository, stderr=subprocess.PIPE) lastCall.check_returncode() # Get all available versions availableDepthAIVersions = [] proc = subprocess.Popen([gitExecutable, 'tag', '-l'], cwd=pathToDepthaiRepository, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: break # Check that the tag refers to DepthAI demo and not SDK tag = line.rstrip().decode() # Check that tag is actually a version if type(version.parse(tag)) is version.Version: availableDepthAIVersions.append(tag) print(f'Available DepthAI versions: {availableDepthAIVersions}') # If any available versions if len(availableDepthAIVersions) == 0: raise RuntimeError('No available depthai versions found') # Assuming versions are available # Get latest version newVersionTag = availableDepthAIVersions[0] newVersion = str(version.parse(newVersionTag)) for ver in availableDepthAIVersions: if version.parse(ver) > version.parse(newVersionTag): newVersionTag = ver newVersion = str(version.parse(ver)) # Check current tag ret = subprocess.run([gitExecutable, 'describe', '--tags'], cwd=pathToDepthaiRepository, stdout=subprocess.PIPE, check=True) tag = ret.stdout.decode() # See if its DepthAI version tag (if not, then suggest to update) if len(tag.split('-')) == 1: currentVersion = 'Unknown' if type(version.parse(tag)) is version.Version: print(f'Current tag: {tag}, ver: {str(version.parse(tag))}') currentVersion = str(version.parse(tag)) # Check if latest version is newer than current if version.parse(newVersionTag) > version.parse(tag): newVersionAvailable = True else: newVersionAvailable = False else: newVersionAvailable = True else: newVersionAvailable = True # If a new version is available, ask to update if newVersionAvailable == True: # Ask user whether to update # Update by default title = 'Update Available' message = f'Version {newVersion} is available.\nCurrent version is {currentVersion}\nUpdate?' print(f'Message Box ({title}): {message}') self.signalUpdateQuestion.emit(title, message) print(f'Should update? {self.shouldUpdate}') didUpdate = False if self.shouldUpdate == True: # DepthAI repo not available, clone first splashScreen.updateSplashMessage('Updating DepthAI Repository ...') splashScreen.enableHeartbeat(True) lastCall = subprocess.run([gitExecutable, 'status', '--porcelain'], cwd=pathToDepthaiRepository, stdout=subprocess.PIPE) filesToRemove = lastCall.stdout.decode() lastCall = subprocess.run([gitExecutable, 'checkout', '--recurse-submodules', newVersionTag], cwd=pathToDepthaiRepository, stderr=subprocess.PIPE) if lastCall.returncode != 0 or filesToRemove != "": # Uncommited changes - redo with a prompt to force # Or unclean working directory errMessage = lastCall.stderr.decode() title = 'Force Update' message = f'DepthAI Repository has changes. Do you want to override the changes?' if lastCall.returncode != 0: message = f'{message}\n{errMessage}' if filesToRemove != "": message = f'{message}\nWould also remove:\n{filesToRemove}' print(f'Message Box ({title}): {message}') self.signalUpdateQuestion.emit(title, message) if self.shouldUpdate == True: lastCall = subprocess.run([gitExecutable, 'checkout', '--recurse-submodules', '-f', newVersionTag], cwd=pathToDepthaiRepository, stderr=subprocess.PIPE) checkoutSuccess = lastCall.returncode == 0 lastCall = subprocess.run([gitExecutable, 'clean', '-fd'], cwd=pathToDepthaiRepository, stderr=subprocess.PIPE) cleanSuccess = lastCall.returncode == 0 if checkoutSuccess == False or cleanSuccess == False: # Stop animation splashScreen.updateSplashMessage('') splashScreen.enableHeartbeat(False) # Couldn't update. Issue a warning errMessage = lastCall.stderr.decode() title = 'Update Aborted' message = f'DepthAI Repository could not be updated.\n{errMessage}' print(f'Message Box ({title}): {message}') self.sigWarning.emit(title, message) else: didUpdate = True else: didUpdate = True if didUpdate: currentArgs = [] if len(sys.argv) >= 1: currentArgs = sys.argv[1:] arguments = [sys.executable, f'{pathToDepthaiRepository}/launcher/launcher.py'] + currentArgs # Run updated launcher print('Updated, running new launcher - command: ' + str(arguments)) subprocess.Popen(arguments, cwd=pathToDepthaiRepository) # Exit current launcher raise Exception('Shutting down and starting updated launcher') except subprocess.CalledProcessError as ex: errMessage = lastCall.stderr.decode() title = 'Git Error' message = f'Git produced the following error: {ex}\nOutput: {errMessage}' print(f'Message Box ({title}): {message}') #self.sigInfo.emit(title, message) #raise Exception('Git Error') except FileNotFoundError as ex: # Stop animation splashScreen.updateSplashMessage('') splashScreen.enableHeartbeat(False) title = 'No Git Available' message = 'Git cannot be found in the path. Make sure Git is installed and added to the path, then try again' print(f'Message Box ({title}): {message}') # TODO(themarpe) - could be made optional, if the following raise and message self.sigCritical.emit(title, message) raise Exception('No Git Found') except RuntimeError as ex: # Stop animation splashScreen.updateSplashMessage('') splashScreen.enableHeartbeat(False) title = 'No DepthAI Versions Found' message = "Couldn't find any available DepthAI versions. Continuing with existing version. Please report to developers." print(f'Message Box ({title}): {message}') # TODO(themarpe) - could be made optional, if the following raise and message self.sigWarning.emit(title, message) try: # Set to quit splash screen a little after subprocess is ran skipSplashQuitFirstTime = False def removeSplash(): time.sleep(2.5) if not skipSplashQuitFirstTime: closeSplash() quitThread = threading.Thread(target=removeSplash) quitThread.start() # All ready, run the depthai_demo.py as a separate process ret = subprocess.run([sys.executable, f'{pathToDepthaiRepository}/{DEPTHAI_DEMO_SCRIPT}'], cwd=pathToDepthaiRepository, stderr=subprocess.PIPE) # Print out stderr first sys.stderr.write(ret.stderr.decode()) print(f'DepthAI Demo ret code: {ret.returncode}') # Install dependencies if demo signaled missing dependencies if ret.returncode == 42: skipSplashQuitFirstTime = True print(f'Dependency issue raised. Retrying by installing requirements and restarting demo.') # present message of installing dependencies splashScreen.updateSplashMessage('Installing DepthAI Requirements ...') splashScreen.enableHeartbeat(True) # Install requirements for depthai_demo.py MAX_RETRY_COUNT = 3 installReqCall = None for retry in range(0, MAX_RETRY_COUNT): installReqCall = subprocess.run([sys.executable, f'{pathToDepthaiRepository}/{DEPTHAI_INSTALL_REQUIREMENTS_SCRIPT}'], cwd=pathToDepthaiRepository, stderr=subprocess.PIPE) if installReqCall.returncode == 0: break if installReqCall.returncode != 0: # Some error happened. Notify user title = 'Error Installing DepthAI Requirements' message = f"Couldn't install DepthAI requirements. Check internet connection and try again. Log available at: {LOG_FILE_PATH}" print(f'Message Box ({title}): {message}') print(f'Install dependencies call failed with return code: {installReqCall.returncode}, message: {installReqCall.stderr.decode()}') self.sigCritical.emit(title, message) raise Exception(title) # Remove message and animation splashScreen.updateSplashMessage('') splashScreen.enableHeartbeat(False) quitThread.join() skipSplashQuitFirstTime = False quitThread = threading.Thread(target=removeSplash) quitThread.start() # All ready, run the depthai_demo.py as a separate process subprocess.run([sys.executable, f'{pathToDepthaiRepository}/{DEPTHAI_DEMO_SCRIPT}'], cwd=pathToDepthaiRepository) except: pass finally: quitThread.join() except Exception as ex: # Catch all for any kind of an error print(f'Unknown error occured ({ex}), exiting...') finally: # At the end quit anyway closeSplash() splashScreen.close() qApp.exit() qApp.worker = Worker() qApp.worker.start() sys.exit(qApp.exec())
test_connection.py
# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import unittest2 as unittest except ImportError: import unittest # noqa from functools import partial import logging from six.moves import range import sys import threading from threading import Thread, Event import time from unittest import SkipTest from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster import cassandra.io.asyncorereactor from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.protocol import QueryMessage from cassandra.connection import Connection from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, HostStateListener from cassandra.pool import HostConnectionPool from tests import is_monkey_patched from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local, \ requiresmallclockgranularity, greaterthancass20 try: from cassandra.io.libevreactor import LibevConnection import cassandra.io.libevreactor except ImportError: LibevConnection = None log = logging.getLogger(__name__) def setup_module(): use_singledc() class ConnectionTimeoutTest(unittest.TestCase): def setUp(self): self.defaultInFlight = Connection.max_in_flight Connection.max_in_flight = 2 self.cluster = Cluster( protocol_version=PROTOCOL_VERSION, load_balancing_policy=HostFilterPolicy( RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP ) ) self.session = self.cluster.connect() def tearDown(self): Connection.max_in_flight = self.defaultInFlight self.cluster.shutdown() def test_in_flight_timeout(self): """ Test to ensure that connection id fetching will block when max_id is reached/ In previous versions of the driver this test will cause a NoHostAvailable exception to be thrown, when the max_id is restricted @since 3.3 @jira_ticket PYTHON-514 @expected_result When many requests are run on a single node connection acquisition should block until connection is available or the request times out. @test_category connection timeout """ futures = [] query = '''SELECT * FROM system.local''' for i in range(100): futures.append(self.session.execute_async(query)) for future in futures: future.result() class TestHostListener(HostStateListener): host_down = None def on_down(self, host): self.host_down = True def on_up(self, host): self.host_down = False class HeartbeatTest(unittest.TestCase): """ Test to validate failing a heartbeat check doesn't mark a host as down @since 3.3 @jira_ticket PYTHON-286 @expected_result host should be marked down when heartbeat fails. This happens after PYTHON-734 @test_category connection heartbeat """ def setUp(self): self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=1) self.session = self.cluster.connect(wait_for_all_pools=True) def tearDown(self): self.cluster.shutdown() @local @greaterthancass20 def test_heart_beat_timeout(self): # Setup a host listener to ensure the nodes don't go down test_listener = TestHostListener() host = "127.0.0.1" node = get_node(1) initial_connections = self.fetch_connections(host, self.cluster) self.assertNotEqual(len(initial_connections), 0) self.cluster.register_listener(test_listener) # Pause the node try: node.pause() # Wait for connections associated with this host go away self.wait_for_no_connections(host, self.cluster) # Wait to seconds for the driver to be notified time.sleep(2) self.assertTrue(test_listener.host_down) # Resume paused node finally: node.resume() # Run a query to ensure connections are re-established current_host = "" count = 0 while current_host != host and count < 100: rs = self.session.execute_async("SELECT * FROM system.local", trace=False) rs.result() current_host = str(rs._current_host) count += 1 time.sleep(.1) self.assertLess(count, 100, "Never connected to the first node") new_connections = self.wait_for_connections(host, self.cluster) self.assertFalse(test_listener.host_down) # Make sure underlying new connections don't match previous ones for connection in initial_connections: self.assertFalse(connection in new_connections) def fetch_connections(self, host, cluster): # Given a cluster object and host grab all connection associated with that host connections = [] holders = cluster.get_connection_holders() for conn in holders: if host == str(getattr(conn, 'host', '')): if isinstance(conn, HostConnectionPool): if conn._connections is not None and len(conn._connections) > 0: connections.append(conn._connections) else: if conn._connection is not None: connections.append(conn._connection) return connections def wait_for_connections(self, host, cluster): retry = 0 while(retry < 300): retry += 1 connections = self.fetch_connections(host, cluster) if len(connections) is not 0: return connections time.sleep(.1) self.fail("No new connections found") def wait_for_no_connections(self, host, cluster): retry = 0 while(retry < 100): retry += 1 connections = self.fetch_connections(host, cluster) if len(connections) is 0: return time.sleep(.5) self.fail("Connections never cleared") class ConnectionTests(object): klass = None def setUp(self): self.klass.initialize_reactor() def get_connection(self, timeout=5): """ Helper method to solve automated testing issues within Jenkins. Officially patched under the 2.0 branch through 17998ef72a2fe2e67d27dd602b6ced33a58ad8ef, but left as is for the 1.0 branch due to possible regressions for fixing an automated testing edge-case. """ conn = None e = None for i in range(5): try: contact_point = CASSANDRA_IP conn = self.klass.factory(host=contact_point, timeout=timeout, protocol_version=PROTOCOL_VERSION) break except (OperationTimedOut, NoHostAvailable, ConnectionShutdown) as e: continue if conn: return conn else: raise e def test_single_connection(self): """ Test a single connection with sequential requests. """ conn = self.get_connection() query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1" event = Event() def cb(count, *args, **kwargs): count += 1 if count >= 10: conn.close() event.set() else: conn.send_msg( QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=0, cb=partial(cb, count)) conn.send_msg( QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=0, cb=partial(cb, 0)) event.wait() def test_single_connection_pipelined_requests(self): """ Test a single connection with pipelined requests. """ conn = self.get_connection() query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1" responses = [False] * 100 event = Event() def cb(response_list, request_num, *args, **kwargs): response_list[request_num] = True if all(response_list): conn.close() event.set() for i in range(100): conn.send_msg( QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=i, cb=partial(cb, responses, i)) event.wait() def test_multiple_connections(self): """ Test multiple connections with pipelined requests. """ conns = [self.get_connection() for i in range(5)] events = [Event() for i in range(5)] query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1" def cb(event, conn, count, *args, **kwargs): count += 1 if count >= 10: conn.close() event.set() else: conn.send_msg( QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=count, cb=partial(cb, event, conn, count)) for event, conn in zip(events, conns): conn.send_msg( QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=0, cb=partial(cb, event, conn, 0)) for event in events: event.wait() def test_multiple_threads_shared_connection(self): """ Test sharing a single connections across multiple threads, which will result in pipelined requests. """ num_requests_per_conn = 25 num_threads = 5 event = Event() conn = self.get_connection() query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1" def cb(all_responses, thread_responses, request_num, *args, **kwargs): thread_responses[request_num] = True if all(map(all, all_responses)): conn.close() event.set() def send_msgs(all_responses, thread_responses): for i in range(num_requests_per_conn): qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) with conn.lock: request_id = conn.get_request_id() conn.send_msg(qmsg, request_id, cb=partial(cb, all_responses, thread_responses, i)) all_responses = [] threads = [] for i in range(num_threads): thread_responses = [False] * num_requests_per_conn all_responses.append(thread_responses) t = Thread(target=send_msgs, args=(all_responses, thread_responses)) threads.append(t) for t in threads: t.start() for t in threads: t.join() event.wait() def test_multiple_threads_multiple_connections(self): """ Test several threads, each with their own Connection and pipelined requests. """ num_requests_per_conn = 25 num_conns = 5 events = [Event() for i in range(5)] query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1" def cb(conn, event, thread_responses, request_num, *args, **kwargs): thread_responses[request_num] = True if all(thread_responses): conn.close() event.set() def send_msgs(conn, event): thread_responses = [False] * num_requests_per_conn for i in range(num_requests_per_conn): qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) with conn.lock: request_id = conn.get_request_id() conn.send_msg(qmsg, request_id, cb=partial(cb, conn, event, thread_responses, i)) event.wait() threads = [] for i in range(num_conns): conn = self.get_connection() t = Thread(target=send_msgs, args=(conn, events[i])) threads.append(t) for t in threads: t.start() for t in threads: t.join() @requiresmallclockgranularity def test_connect_timeout(self): # Underlying socket implementations don't always throw a socket timeout even with min float # This can be timing sensitive, added retry to ensure failure occurs if it can max_retry_count = 10 exception_thrown = False for i in range(max_retry_count): start = time.time() try: conn = self.get_connection(timeout=sys.float_info.min) conn.close() except Exception as e: end = time.time() self.assertAlmostEqual(start, end, 1) exception_thrown = True break self.assertTrue(exception_thrown) def test_subclasses_share_loop(self): if self.klass not in (AsyncoreConnection, LibevConnection): raise SkipTest class C1(self.klass): pass class C2(self.klass): pass clusterC1 = Cluster(connection_class=C1) clusterC1.connect(wait_for_all_pools=True) clusterC2 = Cluster(connection_class=C2) clusterC2.connect(wait_for_all_pools=True) self.addCleanup(clusterC1.shutdown) self.addCleanup(clusterC2.shutdown) self.assertEqual(len(get_eventloop_threads(self.event_loop_name)), 1) def get_eventloop_threads(name): all_threads = list(threading.enumerate()) log.debug('all threads: {}'.format(all_threads)) log.debug('all names: {}'.format([thread.name for thread in all_threads])) event_loops_threads = [thread for thread in all_threads if name == thread.name] return event_loops_threads class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase): klass = AsyncoreConnection event_loop_name = "asyncore_cassandra_driver_event_loop" def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test asyncore with monkey patching") ConnectionTests.setUp(self) def clean_global_loop(self): cassandra.io.asyncorereactor._global_loop._cleanup() cassandra.io.asyncorereactor._global_loop = None class LibevConnectionTests(ConnectionTests, unittest.TestCase): klass = LibevConnection event_loop_name = "event_loop" def setUp(self): if is_monkey_patched(): raise unittest.SkipTest("Can't test libev with monkey patching") if LibevConnection is None: raise unittest.SkipTest( 'libev does not appear to be installed properly') ConnectionTests.setUp(self) def clean_global_loop(self): cassandra.io.libevreactor._global_loop._cleanup() cassandra.io.libevreactor._global_loop = None
manager.py
# Date: 05/10/2018 # Author: Pure-L0G1C # Description: Manages bots from .bot import Bot from .list import List from .spyder import Spyder from threading import Thread from .const import MAX_REQUESTS class Manager(object): def __init__(self, threads, url): self.threads = threads self.spyder = Spyder() self.isAlive = True self.bots = List() self.url = url def bot_size_manager(self): while self.isAlive: while all([self.isAlive, self.bots.lsize < self.threads]): try: if self.spyder.proxies.qsize: proxy = self.spyder.proxies.get() proxy_addr = { 'https': 'https://{}:{}'.format(proxy['ip'], proxy['port']) } browser = self.spyder.browser(proxy_addr) bot = Bot(browser, self.url) self.bots.add(bot) except KeyboardInterrupt: self.isAlive = False def bot_requests_manager(self): while self.isAlive: while all([self.isAlive, self.bots.lsize]): try: expired = [] # expired bots for _ in range(self.bots.lsize): bot = self.bots.get_item(_) if bot.requests >= MAX_REQUESTS: expired.append(_) for _ in expired: self.bots.remove(_) except KeyboardInterrupt: self.isAlive = False def start(self): bot_size = Thread(target=self.bot_size_manager) spyder = Thread(target=self.spyder.proxy_manager) bot_requests = Thread(target=self.bot_requests_manager) bot_requests.daemon = True bot_size.daemon = True spyder.daemon = True spyder.start() bot_size.start() bot_requests.start() def stop(self): self.isAlive = False self.spyder.isAlive = False
test_nmc.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2019 tecnovert # Distributed under the MIT software license, see the accompanying # file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php. """ basicswap]$ python tests/test_nmc.py """ import os import sys import unittest import json import logging import shutil import subprocess import time import signal import threading from urllib.request import urlopen from basicswap.basicswap import ( BasicSwap, Coins, SwapTypes, BidStates, TxStates, ABS_LOCK_BLOCKS, ABS_LOCK_TIME, ) from basicswap.util import ( COIN, toWIF, callrpc_cli, ) from basicswap.key import ( ECKey, ) from basicswap.http_server import ( HttpThread, ) import basicswap.config as cfg logger = logging.getLogger() logger.level = logging.DEBUG if not len(logger.handlers): logger.addHandler(logging.StreamHandler(sys.stdout)) NUM_NODES = 3 BASE_PORT = 14792 BASE_RPC_PORT = 19792 BASE_ZMQ_PORT = 20792 PREFIX_SECRET_KEY_REGTEST = 0x2e TEST_HTML_PORT = 1800 NMC_NODE = 3 BTC_NODE = 4 stop_test = False def prepareOtherDir(datadir, nodeId, conf_file='namecoin.conf'): node_dir = os.path.join(datadir, str(nodeId)) if not os.path.exists(node_dir): os.makedirs(node_dir) filePath = os.path.join(node_dir, conf_file) with open(filePath, 'w+') as fp: fp.write('regtest=1\n') fp.write('[regtest]\n') fp.write('port=' + str(BASE_PORT + nodeId) + '\n') fp.write('rpcport=' + str(BASE_RPC_PORT + nodeId) + '\n') fp.write('daemon=0\n') fp.write('printtoconsole=0\n') fp.write('server=1\n') fp.write('discover=0\n') fp.write('listenonion=0\n') fp.write('bind=127.0.0.1\n') fp.write('findpeers=0\n') fp.write('debug=1\n') fp.write('debugexclude=libevent\n') fp.write('acceptnonstdtxn=0\n') def prepareDir(datadir, nodeId, network_key, network_pubkey): node_dir = os.path.join(datadir, str(nodeId)) if not os.path.exists(node_dir): os.makedirs(node_dir) filePath = os.path.join(node_dir, 'particl.conf') with open(filePath, 'w+') as fp: fp.write('regtest=1\n') fp.write('[regtest]\n') fp.write('port=' + str(BASE_PORT + nodeId) + '\n') fp.write('rpcport=' + str(BASE_RPC_PORT + nodeId) + '\n') fp.write('daemon=0\n') fp.write('printtoconsole=0\n') fp.write('server=1\n') fp.write('discover=0\n') fp.write('listenonion=0\n') fp.write('bind=127.0.0.1\n') fp.write('findpeers=0\n') fp.write('debug=1\n') fp.write('debugexclude=libevent\n') fp.write('zmqpubsmsg=tcp://127.0.0.1:' + str(BASE_ZMQ_PORT + nodeId) + '\n') fp.write('acceptnonstdtxn=0\n') fp.write('minstakeinterval=5\n') for i in range(0, NUM_NODES): if nodeId == i: continue fp.write('addnode=127.0.0.1:%d\n' % (BASE_PORT + i)) if nodeId < 2: fp.write('spentindex=1\n') fp.write('txindex=1\n') basicswap_dir = os.path.join(datadir, str(nodeId), 'basicswap') if not os.path.exists(basicswap_dir): os.makedirs(basicswap_dir) nmcdatadir = os.path.join(datadir, str(NMC_NODE)) btcdatadir = os.path.join(datadir, str(BTC_NODE)) settings_path = os.path.join(basicswap_dir, 'basicswap.json') settings = { 'zmqhost': 'tcp://127.0.0.1', 'zmqport': BASE_ZMQ_PORT + nodeId, 'htmlhost': 'localhost', 'htmlport': 12700 + nodeId, 'network_key': network_key, 'network_pubkey': network_pubkey, 'chainclients': { 'particl': { 'connection_type': 'rpc', 'manage_daemon': False, 'rpcport': BASE_RPC_PORT + nodeId, 'datadir': node_dir, 'bindir': cfg.PARTICL_BINDIR, 'blocks_confirmed': 2, # Faster testing }, 'namecoin': { 'connection_type': 'rpc', 'manage_daemon': False, 'rpcport': BASE_RPC_PORT + NMC_NODE, 'datadir': nmcdatadir, 'bindir': cfg.NAMECOIN_BINDIR, 'use_csv': False, # 'use_segwit': True, }, 'bitcoin': { 'connection_type': 'rpc', 'manage_daemon': False, 'rpcport': BASE_RPC_PORT + BTC_NODE, 'datadir': btcdatadir, 'bindir': cfg.BITCOIN_BINDIR, 'use_segwit': True, } }, 'check_progress_seconds': 2, 'check_watched_seconds': 4, 'check_expired_seconds': 60 } with open(settings_path, 'w') as fp: json.dump(settings, fp, indent=4) def startDaemon(nodeId, bin_dir=cfg.PARTICL_BINDIR, daemon_bin=cfg.PARTICLD): node_dir = os.path.join(cfg.DATADIRS, str(nodeId)) daemon_bin = os.path.join(bin_dir, daemon_bin) args = [daemon_bin, '-datadir=' + node_dir] logging.info('Starting node ' + str(nodeId) + ' ' + daemon_bin + ' ' + '-datadir=' + node_dir) return subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def partRpc(cmd, node_id=0): return callrpc_cli(cfg.PARTICL_BINDIR, os.path.join(cfg.DATADIRS, str(node_id)), 'regtest', cmd, cfg.PARTICL_CLI) def btcRpc(cmd): return callrpc_cli(cfg.BITCOIN_BINDIR, os.path.join(cfg.DATADIRS, str(BTC_NODE)), 'regtest', cmd, cfg.BITCOIN_CLI) def nmcRpc(cmd): return callrpc_cli(cfg.NAMECOIN_BINDIR, os.path.join(cfg.DATADIRS, str(NMC_NODE)), 'regtest', cmd, cfg.NAMECOIN_CLI) def signal_handler(sig, frame): global stop_test print('signal {} detected.'.format(sig)) stop_test = True def run_loop(self): while not stop_test: time.sleep(1) for c in self.swap_clients: c.update() nmcRpc('generatetoaddress 1 {}'.format(self.nmc_addr)) btcRpc('generatetoaddress 1 {}'.format(self.btc_addr)) def waitForRPC(rpc_func, wallet=None): for i in range(5): try: rpc_func('getwalletinfo') return except Exception as ex: logging.warning('Can\'t connect to daemon RPC: %s. Trying again in %d second/s.', str(ex), (1 + i)) time.sleep(1 + i) raise ValueError('waitForRPC failed') class Test(unittest.TestCase): @classmethod def setUpClass(cls): super(Test, cls).setUpClass() eckey = ECKey() eckey.generate() cls.network_key = toWIF(PREFIX_SECRET_KEY_REGTEST, eckey.get_bytes()) cls.network_pubkey = eckey.get_pubkey().get_bytes().hex() if os.path.isdir(cfg.DATADIRS): logging.info('Removing ' + cfg.DATADIRS) shutil.rmtree(cfg.DATADIRS) for i in range(NUM_NODES): prepareDir(cfg.DATADIRS, i, cls.network_key, cls.network_pubkey) prepareOtherDir(cfg.DATADIRS, NMC_NODE) prepareOtherDir(cfg.DATADIRS, BTC_NODE, 'bitcoin.conf') cls.daemons = [] cls.swap_clients = [] cls.daemons.append(startDaemon(BTC_NODE, cfg.BITCOIN_BINDIR, cfg.BITCOIND)) logging.info('Started %s %d', cfg.BITCOIND, cls.daemons[-1].pid) cls.daemons.append(startDaemon(NMC_NODE, cfg.NAMECOIN_BINDIR, cfg.NAMECOIND)) logging.info('Started %s %d', cfg.NAMECOIND, cls.daemons[-1].pid) for i in range(NUM_NODES): cls.daemons.append(startDaemon(i)) logging.info('Started %s %d', cfg.PARTICLD, cls.daemons[-1].pid) time.sleep(1) for i in range(NUM_NODES): basicswap_dir = os.path.join(os.path.join(cfg.DATADIRS, str(i)), 'basicswap') settings_path = os.path.join(basicswap_dir, 'basicswap.json') with open(settings_path) as fs: settings = json.load(fs) fp = open(os.path.join(basicswap_dir, 'basicswap.log'), 'w') cls.swap_clients.append(BasicSwap(fp, basicswap_dir, settings, 'regtest', log_name='BasicSwap{}'.format(i))) cls.swap_clients[-1].setDaemonPID(Coins.BTC, cls.daemons[0].pid) cls.swap_clients[-1].setDaemonPID(Coins.NMC, cls.daemons[1].pid) cls.swap_clients[-1].setDaemonPID(Coins.PART, cls.daemons[2 + i].pid) cls.swap_clients[-1].start() cls.swap_clients[0].callrpc('extkeyimportmaster', ['abandon baby cabbage dad eager fabric gadget habit ice kangaroo lab absorb']) cls.swap_clients[1].callrpc('extkeyimportmaster', ['pact mammal barrel matrix local final lecture chunk wasp survey bid various book strong spread fall ozone daring like topple door fatigue limb olympic', '', 'true']) cls.swap_clients[1].callrpc('getnewextaddress', ['lblExtTest']) cls.swap_clients[1].callrpc('rescanblockchain') waitForRPC(nmcRpc) num_blocks = 500 logging.info('Mining %d namecoin blocks', num_blocks) cls.nmc_addr = nmcRpc('getnewaddress mining_addr legacy') nmcRpc('generatetoaddress {} {}'.format(num_blocks, cls.nmc_addr)) ro = nmcRpc('getblockchaininfo') try: assert(ro['bip9_softforks']['csv']['status'] == 'active') except Exception: logging.info('nmc: csv is not active') try: assert(ro['bip9_softforks']['segwit']['status'] == 'active') except Exception: logging.info('nmc: segwit is not active') waitForRPC(btcRpc) cls.btc_addr = btcRpc('getnewaddress mining_addr bech32') logging.info('Mining %d bitcoin blocks to %s', num_blocks, cls.btc_addr) btcRpc('generatetoaddress {} {}'.format(num_blocks, cls.btc_addr)) ro = btcRpc('getblockchaininfo') assert(ro['bip9_softforks']['csv']['status'] == 'active') assert(ro['bip9_softforks']['segwit']['status'] == 'active') ro = nmcRpc('getwalletinfo') print('nmcRpc', ro) cls.http_threads = [] host = '0.0.0.0' # All interfaces (docker) for i in range(3): t = HttpThread(cls.swap_clients[i].fp, host, TEST_HTML_PORT + i, False, cls.swap_clients[i]) cls.http_threads.append(t) t.start() signal.signal(signal.SIGINT, signal_handler) cls.update_thread = threading.Thread(target=run_loop, args=(cls,)) cls.update_thread.start() @classmethod def tearDownClass(cls): global stop_test logging.info('Finalising') stop_test = True cls.update_thread.join() for t in cls.http_threads: t.stop() t.join() for c in cls.swap_clients: c.fp.close() for d in cls.daemons: logging.info('Terminating %d', d.pid) d.terminate() d.wait(timeout=10) if d.stdout: d.stdout.close() if d.stderr: d.stderr.close() if d.stdin: d.stdin.close() super(Test, cls).tearDownClass() def wait_for_offer(self, swap_client, offer_id): logging.info('wait_for_offer %s', offer_id.hex()) for i in range(20): time.sleep(1) offers = swap_client.listOffers() for offer in offers: if offer.offer_id == offer_id: return raise ValueError('wait_for_offer timed out.') def wait_for_bid(self, swap_client, bid_id): logging.info('wait_for_bid %s', bid_id.hex()) for i in range(20): time.sleep(1) bids = swap_client.listBids() for bid in bids: if bid[1] == bid_id and int(bid[5]) == 1: return raise ValueError('wait_for_bid timed out.') def wait_for_in_progress(self, swap_client, bid_id, sent=False): logging.info('wait_for_in_progress %s', bid_id.hex()) for i in range(20): time.sleep(1) swaps = swap_client.listSwapsInProgress() for b in swaps: if b[0] == bid_id: return raise ValueError('wait_for_in_progress timed out.') def wait_for_bid_state(self, swap_client, bid_id, state, sent=False, seconds_for=30): logging.info('wait_for_bid_state %s %s', bid_id.hex(), str(state)) for i in range(seconds_for): time.sleep(1) bid = swap_client.getBid(bid_id) if bid.state >= state: return raise ValueError('wait_for_bid_state timed out.') def wait_for_bid_tx_state(self, swap_client, bid_id, initiate_state, participate_state, seconds_for=30): logging.info('wait_for_bid_tx_state %s %s %s', bid_id.hex(), str(initiate_state), str(participate_state)) for i in range(seconds_for): time.sleep(1) bid = swap_client.getBid(bid_id) if (initiate_state is None or bid.getITxState() == initiate_state) \ and (participate_state is None or bid.getPTxState() == participate_state): return raise ValueError('wait_for_bid_tx_state timed out.') def test_02_part_ltc(self): logging.info('---------- Test PART to NMC') swap_clients = self.swap_clients offer_id = swap_clients[0].postOffer(Coins.PART, Coins.NMC, 100 * COIN, 0.1 * COIN, 100 * COIN, SwapTypes.SELLER_FIRST, ABS_LOCK_TIME) self.wait_for_offer(swap_clients[1], offer_id) offers = swap_clients[1].listOffers() assert(len(offers) == 1) for offer in offers: if offer.offer_id == offer_id: bid_id = swap_clients[1].postBid(offer_id, offer.amount_from) self.wait_for_bid(swap_clients[0], bid_id) swap_clients[0].acceptBid(bid_id) self.wait_for_in_progress(swap_clients[1], bid_id, sent=True) self.wait_for_bid_state(swap_clients[0], bid_id, BidStates.SWAP_COMPLETED, seconds_for=60) self.wait_for_bid_state(swap_clients[1], bid_id, BidStates.SWAP_COMPLETED, sent=True, seconds_for=60) js_0 = json.loads(urlopen('http://localhost:1800/json').read()) js_1 = json.loads(urlopen('http://localhost:1801/json').read()) assert(js_0['num_swapping'] == 0 and js_0['num_watched_outputs'] == 0) assert(js_1['num_swapping'] == 0 and js_1['num_watched_outputs'] == 0) def test_03_nmc_part(self): logging.info('---------- Test NMC to PART') swap_clients = self.swap_clients offer_id = swap_clients[1].postOffer(Coins.NMC, Coins.PART, 10 * COIN, 9.0 * COIN, 10 * COIN, SwapTypes.SELLER_FIRST, ABS_LOCK_TIME) self.wait_for_offer(swap_clients[0], offer_id) offers = swap_clients[0].listOffers() for offer in offers: if offer.offer_id == offer_id: bid_id = swap_clients[0].postBid(offer_id, offer.amount_from) self.wait_for_bid(swap_clients[1], bid_id) swap_clients[1].acceptBid(bid_id) self.wait_for_in_progress(swap_clients[0], bid_id, sent=True) self.wait_for_bid_state(swap_clients[0], bid_id, BidStates.SWAP_COMPLETED, sent=True, seconds_for=60) self.wait_for_bid_state(swap_clients[1], bid_id, BidStates.SWAP_COMPLETED, seconds_for=60) js_0 = json.loads(urlopen('http://localhost:1800/json').read()) js_1 = json.loads(urlopen('http://localhost:1801/json').read()) assert(js_0['num_swapping'] == 0 and js_0['num_watched_outputs'] == 0) assert(js_1['num_swapping'] == 0 and js_1['num_watched_outputs'] == 0) def test_04_nmc_btc(self): logging.info('---------- Test NMC to BTC') swap_clients = self.swap_clients offer_id = swap_clients[0].postOffer(Coins.NMC, Coins.BTC, 10 * COIN, 0.1 * COIN, 10 * COIN, SwapTypes.SELLER_FIRST, ABS_LOCK_TIME) self.wait_for_offer(swap_clients[1], offer_id) offers = swap_clients[1].listOffers() for offer in offers: if offer.offer_id == offer_id: bid_id = swap_clients[1].postBid(offer_id, offer.amount_from) self.wait_for_bid(swap_clients[0], bid_id) swap_clients[0].acceptBid(bid_id) self.wait_for_in_progress(swap_clients[1], bid_id, sent=True) self.wait_for_bid_state(swap_clients[0], bid_id, BidStates.SWAP_COMPLETED, seconds_for=60) self.wait_for_bid_state(swap_clients[1], bid_id, BidStates.SWAP_COMPLETED, sent=True, seconds_for=60) js_0bid = json.loads(urlopen('http://localhost:1800/json/bids/{}'.format(bid_id.hex())).read()) js_0 = json.loads(urlopen('http://localhost:1800/json').read()) js_1 = json.loads(urlopen('http://localhost:1801/json').read()) assert(js_0['num_swapping'] == 0 and js_0['num_watched_outputs'] == 0) assert(js_1['num_swapping'] == 0 and js_1['num_watched_outputs'] == 0) def test_05_refund(self): # Seller submits initiate txn, buyer doesn't respond logging.info('---------- Test refund, NMC to BTC') swap_clients = self.swap_clients offer_id = swap_clients[0].postOffer(Coins.NMC, Coins.BTC, 10 * COIN, 0.1 * COIN, 10 * COIN, SwapTypes.SELLER_FIRST, ABS_LOCK_BLOCKS, 10) self.wait_for_offer(swap_clients[1], offer_id) offers = swap_clients[1].listOffers() for offer in offers: if offer.offer_id == offer_id: bid_id = swap_clients[1].postBid(offer_id, offer.amount_from) self.wait_for_bid(swap_clients[0], bid_id) swap_clients[1].abandonBid(bid_id) swap_clients[0].acceptBid(bid_id) self.wait_for_bid_state(swap_clients[0], bid_id, BidStates.SWAP_COMPLETED, seconds_for=60) self.wait_for_bid_state(swap_clients[1], bid_id, BidStates.BID_ABANDONED, sent=True, seconds_for=60) js_0 = json.loads(urlopen('http://localhost:1800/json').read()) js_1 = json.loads(urlopen('http://localhost:1801/json').read()) assert(js_0['num_swapping'] == 0 and js_0['num_watched_outputs'] == 0) assert(js_1['num_swapping'] == 0 and js_1['num_watched_outputs'] == 0) def test_06_self_bid(self): logging.info('---------- Test same client, BTC to NMC') swap_clients = self.swap_clients js_0_before = json.loads(urlopen('http://localhost:1800/json').read()) offer_id = swap_clients[0].postOffer(Coins.NMC, Coins.BTC, 10 * COIN, 10 * COIN, 10 * COIN, SwapTypes.SELLER_FIRST, ABS_LOCK_TIME) self.wait_for_offer(swap_clients[0], offer_id) offers = swap_clients[0].listOffers() for offer in offers: if offer.offer_id == offer_id: bid_id = swap_clients[0].postBid(offer_id, offer.amount_from) self.wait_for_bid(swap_clients[0], bid_id) swap_clients[0].acceptBid(bid_id) self.wait_for_bid_tx_state(swap_clients[0], bid_id, TxStates.TX_REDEEMED, TxStates.TX_REDEEMED, seconds_for=60) self.wait_for_bid_state(swap_clients[0], bid_id, BidStates.SWAP_COMPLETED, seconds_for=60) js_0 = json.loads(urlopen('http://localhost:1800/json').read()) assert(js_0['num_swapping'] == 0 and js_0['num_watched_outputs'] == 0) assert(js_0['num_recv_bids'] == js_0_before['num_recv_bids'] + 1 and js_0['num_sent_bids'] == js_0_before['num_sent_bids'] + 1) def test_07_error(self): logging.info('---------- Test error, BTC to NMC, set fee above bid value') swap_clients = self.swap_clients js_0_before = json.loads(urlopen('http://localhost:1800/json').read()) offer_id = swap_clients[0].postOffer(Coins.NMC, Coins.BTC, 0.001 * COIN, 1.0 * COIN, 0.001 * COIN, SwapTypes.SELLER_FIRST, ABS_LOCK_TIME) self.wait_for_offer(swap_clients[0], offer_id) offers = swap_clients[0].listOffers() for offer in offers: if offer.offer_id == offer_id: bid_id = swap_clients[0].postBid(offer_id, offer.amount_from) self.wait_for_bid(swap_clients[0], bid_id) swap_clients[0].acceptBid(bid_id) swap_clients[0].coin_clients[Coins.BTC]['override_feerate'] = 10.0 swap_clients[0].coin_clients[Coins.NMC]['override_feerate'] = 10.0 self.wait_for_bid_state(swap_clients[0], bid_id, BidStates.BID_ERROR, seconds_for=60) def pass_99_delay(self): global stop_test logging.info('Delay') for i in range(60 * 5): if stop_test: break time.sleep(1) print('delay', i) stop_test = True if __name__ == '__main__': unittest.main()
supervised_popen.py
# Software License Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Fraunhofer nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from python_qt_binding.QtCore import QObject, Signal import subprocess import threading from .detailed_msg_box import MessageBox class SupervisedPopen(QObject): ''' The class overrides the subprocess.Popen and waits in a thread for its finish. If an error is printed out, it will be shown in a message dialog. ''' error = Signal(str, str, str) '''@ivar: the signal is emitted if error output was detected (id, decription, message)''' finished = Signal(str) '''@ivar: the signal is emitted on exit (id)''' def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=subprocess.PIPE, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0, object_id='', description=''): ''' For arguments see https://docs.python.org/2/library/subprocess.html Additional arguments: :param object_id: the identification string of this object and title of the error message dialog :type object_id: str :param description: the description string used as addiotional information in dialog if an error was occured :type description: str ''' try: QObject.__init__(self) self._args = args self._object_id = object_id self._description = description self.error.connect(self.on_error) # wait for process to avoid 'defunct' processes self.popen = subprocess.Popen(args=args, bufsize=bufsize, executable=executable, stdin=stdin, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, close_fds=close_fds, shell=shell, cwd=cwd, env=env, universal_newlines=universal_newlines, startupinfo=startupinfo, creationflags=creationflags) thread = threading.Thread(target=self._supervise) thread.setDaemon(True) thread.start() except Exception as _: raise # def __del__(self): # print "Deleted:", self._description @property def stdout(self): return self.popen.stdout @property def stderr(self): return self.popen.stderr @property def stdin(self): return self.popen.stdin def _supervise(self): ''' Wait for process to avoid 'defunct' processes ''' self.popen.wait() result_err = '' if self.stderr is not None: result_err = self.stderr.read() if result_err: self.error.emit(self._object_id, self._description, result_err) self.finished.emit(self._object_id) def on_error(self, object_id, descr, msg): MessageBox.warning(None, object_id, '%s\n\n' '%s' % (descr, msg), ' '.join(self._args))
client.py
""" Web socket client mixins. | Copyright 2017-2020, Voxel51, Inc. | `voxel51.com <https://voxel51.com/>`_ | """ from collections import defaultdict import logging import signal import threading from retrying import retry import socketio import fiftyone.constants as foc logging.getLogger("socketio").setLevel(logging.ERROR) logging.getLogger("engineio").setLevel(logging.ERROR) # We only want one session to print notifications per namespace and per process _printer = defaultdict(lambda: None) class BaseClient(socketio.ClientNamespace): """SocketIO Client. It is possible to add any arbitrary ``on_my_event()`` method to a socketio ClientNamespace, but using a single generic ``on_update()`` is sufficient. Organizing message categories can instead be done by subclassing :class:`HasClient`. Attributes: data: the current data data_cls: the data cls to load updated data as Args: namespace: client namespace data_cls: data class type (must be ``eta.core.serial.Serializable``) """ def __init__(self, namespace, data_cls): self.data_cls = data_cls self.data = data_cls() self.connected = False self.updated = False self.namespace = namespace super().__init__(namespace) # disable socketio's interrupt handler because it closes the connection # on ctrl-c in interactive sessions signal.signal(signal.SIGINT, signal.default_int_handler) def __del__(self): _printer[self.namespace] = None def on_connect(self): """Receives the "connect" event.""" self.connected = True def on_disconnect(self): """Receives the "disconnect" event.""" self.connected = False def on_update(self, data): """Receives an update. Args: data: the new data """ self.updated = True self.data = self.data_cls.from_dict(data) def on_notification(self, data): """Receives a server error. Args: data: the error message """ if _printer[self.namespace] is None: _printer[self.namespace] = self if _printer[self.namespace] != self: return print(data["kind"]) print() print(data["message"]) print() for value in data["session_items"]: print(value) def update(self, data): """Sends an update. Args: data: the new data """ self.data = data self.emit("update", {"data": data.serialize(), "include_self": False}) @retry(wait_fixed=500, stop_max_delay=5000) def _connect(sio, addr): sio.connect(addr) class HasClient(object): """Mixin that supports maintaining a shared state of data using web sockets. Subclasses must set the ``_HC_NAMESPACE`` and ``_HC_ATTR_NAME`` class attributes. Attributes: _HC_NAMESPACE: The socketio namespace to use _HC_ATTR_NAME: The attribute name to use for that shared data. The data must be a subclass of ``eta.core.serial.Serializable`` """ _HC_NAMESPACE = None _HC_ATTR_NAME = None _HC_ATTR_TYPE = None def __init__(self, port): self._hc_sio = socketio.Client() # the following is a monkey patch to set threads to daemon mode self._hc_sio.eio.start_background_task = _start_background_task self._hc_client = BaseClient( "/" + self._HC_NAMESPACE, self._HC_ATTR_TYPE ) self._hc_sio.register_namespace(self._hc_client) _connect(self._hc_sio, foc.SERVER_ADDR % port) def __getattr__(self, name): """Gets the data via the attribute defined by ``_HC_ATTR_NAME``.""" if name == self._HC_ATTR_NAME: return self._hc_client.data return None def __setattr__(self, name, value): """Sets the data to the attribute defined by ``_HC_ATTR_NAME``.""" if name == self._HC_ATTR_NAME: if self._HC_ATTR_TYPE is not None and not isinstance( value, self._HC_ATTR_TYPE ): raise ValueError( "Client expected type %s, but got type %s" % (self._HC_ATTR_TYPE, type(value)) ) self._hc_client.update(value) else: super().__setattr__(name, value) def _start_background_task(target, *args, **kwargs): """We are monkey patching here to start threads in ``daemon`` mode. Original docs below: The patch allows for clean exits out of python. Start a background task. This is a utility function that applications can use to start a background task. Args: target: the target function to execute *args: arguments to pass to the function **kwargs: keyword arguments to pass to the function Returns: an object compatible with the ``Thread`` class in the Python standard library. The ``start()`` method on this object is called by this function before returning it """ th = threading.Thread(target=target, args=args, kwargs=kwargs, daemon=True) th.start() return th
Hiwin_RT605_ArmCommand_Socket_20190627180655.py
#!/usr/bin/env python3 # license removed for brevity import rospy import os import socket ##多執行序 import threading import time import sys import matplotlib as plot import HiwinRA605_socket_TCPcmd as TCP import HiwinRA605_socket_Taskcmd as Taskcmd import numpy as np from std_msgs.msg import String from ROS_Socket.srv import * from ROS_Socket.msg import * from std_msgs.msg import Int32MultiArray import math import enum Socket = 0 data = '0' #設定傳輸資料初始值 Arm_feedback = 1 #假設手臂忙碌 NAME = 'socket_server' arm_mode_flag = False ##------------class pos------- class point(): def __init__(self, x, y, z, pitch, roll, yaw): self.x = x self.y = y self.z = z self.pitch = pitch self.roll = roll self.yaw = yaw pos = point(0.0,36.8,11.35,-90.0,0.0,0.0) ##------------class socket_cmd--------- class socket_data(): def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode): self.grip = grip self.setvel = setvel self.ra = ra self.delay = delay self.setboth = setboth self.action = action self.Speedmode = Speedmode socket_cmd = socket_data(0,0.0,0,0,0,0,0) ##-----------switch define------------## class switch(object): def __init__(self, value): self.value = value self.fall = False def __iter__(self): """Return the match method once, then stop""" yield self.match raise StopIteration def match(self, *args): """Indicate whether or not to enter a case suite""" if self.fall or not args: return True elif self.value in args: # changed for v1.5, see below self.fall = True return True else: return False ##-----------client feedback arm state---------- class StateFeedback(): def __init__(self,ArmState,SentFlag): self.ArmState = ArmState self.SentFlag = SentFlag state_feedback = StateFeedback(0,0) def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料 pos.x = '%s'%x pos.y = '%s'%y pos.z = '%s'%z pos.pitch = '%s'%pitch pos.roll = '%s'%roll pos.yaw = '%s'%yaw ##----------Arm Mode-------------### def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料 global arm_mode_flag socket_cmd.action = int('%s'%action) socket_cmd.grip = int('%s'%grip) socket_cmd.ra = int('%s'%ra) socket_cmd.setvel = int('%s'%setvel) socket_cmd.setboth = int('%s'%setboth) arm_mode_flag = True Socket_command() ##-------Arm Speed Mode------------### def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料 global speed_mode_flag socket_cmd.Speedmode = speedmode def socket_talker(): ##創建Server node pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10) rospy.init_node(NAME) rate = rospy.Rate(10) # 10hz print ("Ready to connect") while not rospy.is_shutdown(): # hello_str = "hello world %s" % rospy.get_time() state = Int32MultiArray() state.data = [state_feedback.ArmState,state_feedback.SentFlag] # rospy.loginfo(state) pub.publish(state) rate.sleep() ##----------socket 封包傳輸--------------## ##---------------socket 傳輸手臂命令----------------- def Socket_command(): global Socket,arm_mode_flag,data if arm_mode_flag == True: arm_mode_flag = False for case in switch(socket_cmd.action): #-------PtP Mode-------- if case(Taskcmd.Action_Type.PtoP): for case in switch(socket_cmd.setboth): if case(Taskcmd.Ctrl_Mode.CTRL_POS): data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break if case(Taskcmd.Ctrl_Mode.CTRL_EULER): data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break if case(Taskcmd.Ctrl_Mode.CTRL_BOTH): data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break break #-------Line Mode-------- if case(Taskcmd.Action_Type.Line): for case in switch(socket_cmd.setboth): if case(Taskcmd.Ctrl_Mode.CTRL_POS): data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel) break if case(Taskcmd.Ctrl_Mode.CTRL_EULER): data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel ) break if case(Taskcmd.Ctrl_Mode.CTRL_BOTH): data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel ) break break #-------設定手臂速度-------- if case(Taskcmd.Action_Type.SetVel): data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel) break #-------設定手臂Delay時間-------- if case(Taskcmd.Action_Type.Delay): data = TCP.SetDelay(socket_cmd.grip,0) break #-------設定手臂急速&安全模式-------- if case(Taskcmd.Action_Type.Mode): data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode) break socket_cmd.action= 5 ##切換初始mode狀態 #print(data) Socket.send(data.encode('utf-8'))#socket傳送for python to translate str ##-----------socket client-------- def socket_client(): global Socket try: Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin #s.connect(('192.168.1.102', 8080))#iclab computerx except socket.error as msg: print(msg) sys.exit(1) print('Connection has been successful') print(Socket.recv(1024)) while 1: feedback_str = Socket.recv(1024) #手臂端傳送手臂狀態 if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令 state_feedback.ArmState = 0 if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令 state_feedback.ArmState = 1 if str(feedback_str[2]) == '54':# 6 策略完成 state_feedback.ArmState = 6 print("shutdown") #確認傳送旗標 if str(feedback_str[4]) == '48':#回傳0 false state_feedback.SentFlag = 0 if str(feedback_str[4]) == '49':#回傳1 true state_feedback.SentFlag = 1 ##---------------socket 傳輸手臂命令 end----------------- if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown: break rospy.on_shutdown(myhook) Socket.close() ##-----------socket client end-------- ##-------------socket 封包傳輸 end--------------## ## 多執行緒 # def thread_test(): # socket_client() ## 多執行序 end def myhook(): print ("shutdown time!") if __name__ == '__main__': socket_cmd.action = 5##切換初始mode狀態 t = threading.Thread(target=socket_client) t.start() # 開啟多執行緒 try: socket_talker() except rospy.ROSInterruptException: pass t.join()
runBioWordVec.py
from helper import * import multiprocessing import sys import time from gensim.models.wrappers import FastText numKeywords = int(sys.argv[1]) vectorSize = int(sys.argv[2]) maxCandidateArticles = int(sys.argv[3]) reducedSet = str(sys.argv[4]) print(f"REDUCE SET VALUE == {reducedSet}") printTimestamp("Getting candidate articles") if reducedSet=='true': candidate_articles = getCandidateArticles(maxCandidateArticles, True) else: candidate_articles = getCandidateArticles(maxCandidateArticles, False) printTimestamp("Loading BioWordVec") model = load_embedding("/Data/concept_model.bin") start = time.time() for query in range(6,7): for keywordExtractor in ["TopicRank", "TfIdf", "KPMiner", "YAKE", "TextRank", "SingleRank", "TopicalPageRank", "PositionRank", "MultipartiteRank"]: mp = multiprocessing.Process(target=findSimilarity, args=(keywordExtractor, "BioWordVec", model, candidate_articles, query, numKeywords, vectorSize)) mp.start() #mp.join() end = time.time() print('{:.4f} s'.format(end - start))
optimal_args_hashbits.py
#! /usr/bin/env python # This file is part of khmer, https://github.com/dib-lab/khmer/, and is # Copyright (C) 2015, Michigan State University. # Copyright (C) 2015, The Regents of the University of California. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of the Michigan State University nor the names # of its contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Contact: khmer-project@idyll.org # pylint: disable=invalid-name,missing-docstring """ Estimate optimal arguments using nodegraph counting. % python sandbox/optimal_args_nodegraph.py <data1> [ <data2> <...> ] Use '-h' for parameter help. """ from __future__ import print_function import sys import math import threading import khmer from khmer.khmer_args import (report_on_config, info, add_threading_args, build_nodegraph_args) from khmer.kfile import check_input_files, check_space from khmer.kfile import check_space from khmer.khmer_args import graphsize_args_report def get_parser(): parser = build_nodegraph_args(descr="Load sequences into the compressible " "graph format plus optional tagset.") add_threading_args(parser) parser.add_argument('input_filenames', metavar='input_sequence_filename', nargs='+', help='input FAST[AQ] sequence filename') return parser def main(): info('optimal_args_nodegraph.py', ['graph', 'SeqAn']) args = get_parser().parse_args() report_on_config(args, graphtype='nodegraph') filenames = args.input_filenames base = filenames[0] for _ in args.input_filenames: check_input_files(_, False) check_space(args.input_filenames, False) print('Counting kmers from sequences in %s' % repr(filenames), file=sys.stderr) htable = khmer.new_nodegraph(args.ksize, args.max_tablesize, args.n_tables) target_method = htable.consume_fasta_with_reads_parser for _, filename in enumerate(filenames): rparser = khmer.ReadParser(filename) threads = [] print('consuming input', filename, file=sys.stderr) for num in xrange(args.threads): cur_thread = threading.Thread( target=target_method, args=(rparser,)) threads.append(cur_thread) cur_thread.start() for thread in threads: thread.join() unique_kmers = htable.n_unique_kmers() print('Total number of unique k-mers: {0}'.format(unique_kmers), file=sys.stderr) info_optimal = open(base + '.optimal_args', 'w') fp_rate = khmer.calc_expected_collisions(htable) print('fp rate estimated to be %1.3f' % fp_rate, file=sys.stderr) if fp_rate > 0.15: # 0.18 is ACTUAL MAX. Do not change. print("**", file=sys.stderr) print("** ERROR: the graph structure is too small for this data set." "Increase table size/# tables.", file=sys.stderr) print("**", file=sys.stderr) if not False: sys.exit(1) to_print = graphsize_args_report(unique_kmers, fp_rate) print(to_print, file=info_optimal) print('optimal arguments were written to', base + '.optimal_args', file=sys.stderr) if __name__ == '__main__': main() # vim: set filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab: # vim: set textwidth=79:
state_signals_responder.py
import sys import state_signals from multiprocessing import Process import argparse import subprocess class StateSignalsResponder: """ This class runs state signals responder """ def __init__(self, redis_host: str, run_workload_method: str, timeout: int): self.__redis_host = redis_host self.__run_workload_method = run_workload_method self.__timeout = timeout def _listener(self): responder = state_signals.SignalResponder( redis_host=self.__redis_host, responder_name="benchmark-runner", conn_timeout=self.__timeout ) for signal in responder.listen(): if signal.tag == "bad": ras = 0 elif signal.event == 'benchmark-start': output = subprocess.check_output( self.__run_workload_method, stderr=subprocess.STDOUT, shell=True, timeout=self.__timeout, universal_newlines=False) print(output.decode("utf-8")) if output: ras = 1 else: ras = 0 elif signal.event == 'shutdown': sys.exit() else: ras = 1 # responder.respond(signal.publisher_id, signal.event, ras) responder.srespond(signal, ras=ras) def main(): # Instantiate the parser parser = argparse.ArgumentParser(description='Signal State Responder') # Required positional argument parser.add_argument('redis_host', type=str, help='Redis host') parser.add_argument('workload_method', type=str, help='workload method') parser.add_argument('timeout', type=int, help='timeout') args = parser.parse_args() run = StateSignalsResponder(redis_host=args.redis_host, run_workload_method=args.workload_method, timeout=args.timeout) init = Process(target=run._listener) init.start() main() # python3.9 /state_signals_responder.py redis-deployment.redis-db.svc.cluster.local /vdbench/vdbench_runner.sh 3600
main.py
import requests import redis import brotli import queue import threading import os from multiprocessing.dummy import Pool base = 'http://23.95.221.108' cores = 8 pool = Pool(cores) limit = 1290 redis_queue = queue.LifoQueue() client = redis.StrictRedis() cache = client.hgetall("ebooks") dir_name = './txt/' def get(path): url = base + path if cache.get(path) is not None: html = brotli.decompress(cache[url]) else: html = requests.get(url).text redis_queue.put((path, brotli.compress(html.encode(), brotli.MODE_TEXT))) return html def page(page_id): path = page_path(page_id) return get(path) def page_path(page_id): return '/page/' + str(page_id) def get_page(page_id): file = dir_name + str(page_id) + '.txt' if not os.path.exists(file): print("Writing %d" % page_id) html = page(page_id) fd = open(file, "w+") fd.write(html) fd.flush() fd.close() def redis_set(): while redis_queue.not_empty: key, val = redis_queue.get() client.hset("ebooks", key, val) print(f'{key} -> {len(val)}') def main(): print("Got %d items" % len(cache)) if not os.path.exists(dir_name): print("Creating dir %s" % dir_name) os.makedirs(dir_name) threading.Thread(target=redis_set, daemon=True).start() pool.map_async(get_page, range(1, limit + 1)).wait() main()
system_profiler.py
# Copyright 2021 MosaicML. All Rights Reserved. """Profiler to record system level metrics.""" from __future__ import annotations import threading import time from typing import TYPE_CHECKING, Dict, cast import psutil from composer.callbacks import memory_monitor from composer.core.callback import Callback if TYPE_CHECKING: from composer.core.logging.logger import Logger from composer.core.state import State from composer.profiler import Profiler __all__ = ["SystemProfiler"] class SystemProfiler(Callback): """The SystemProfiler records system level metrics. Implemented as a :class:`.Callback`, the profiler forks a thread during :attr:`.Event.INIT` which polls and records system state. When used with the Composer :class:`.Trainer`\\, the system profiler is enabled if profiling is enabled. .. note:: The Composer :class:`.Trainer` creates an instance of :class:`.TorchProfiler` when ``tensorboard_trace_handler_dir`` is provided. The user should not create and directly register an instance of :class:`.TorchProfiler` when using the Composer :class:`.Trainer`\\. Args: profile_cpu (bool): Whether to record cpu statistics (Default: ``True``) profile_memory (bool): Whether to record memory statistics (Default: ``False``) profile_disk (bool): Whether to record disk I/O statistics (Default: ``False``) profile_net (bool): Whether to record network I/O statistics (Default: ``False``) stats_thread_interval_seconds (float): Interval to record system-level stats, in seconds. (Default: every ``0.5`` seconds) """ def __init__(self, profile_cpu: bool = True, profile_memory: bool = False, profile_disk: bool = False, profile_net: bool = False, stats_thread_interval_seconds: float = 0.5) -> None: self.profile_cpu = profile_cpu self.profile_disk = profile_disk self.profile_memory = profile_memory self.profile_net = profile_net self.stats_thread_interval_seconds = stats_thread_interval_seconds def init(self, state: State, logger: Logger): del logger # unused assert state.profiler is not None, "The trainer should have set the profiler in state" # Start the stats thread threading.Thread(target=self._stats_thread, daemon=True, args=[state.profiler]).start() def _stats_thread(self, profiler: Profiler): """Gathers requested system metrics at :attr:`SystemProfiler.stats_thread_interval_seconds` interval.""" psutil.disk_io_counters.cache_clear() psutil.net_io_counters.cache_clear() if self.profile_cpu: psutil.cpu_percent() # spin it once to clear the default 0.0 value on the first call while True: if self.profile_cpu: cpu_percent = psutil.cpu_percent() profiler.marker(name="cpu", categories=["cpu"]).counter({"cpu_percent": cpu_percent}) if self.profile_memory: cuda_memory_stats = memory_monitor._get_memory_report() for name, val in cuda_memory_stats.items(): profiler.marker(f"memory/cuda/{name}", categories=["memory"]).counter({name: val}) swap_memory = psutil.swap_memory() profiler.marker("memory/swap", categories=["memory"]).counter({ "used_gb": swap_memory.used / 2**9, "free_gb": swap_memory.free / 2**9 }) virtual_memory = psutil.virtual_memory() profiler.marker("memory/virtual", categories=["memory"]).counter({ "used_gb": virtual_memory.used / 2**9, "available_gb": virtual_memory.available / 2**9 }) if self.profile_disk: disk_io_counters = cast(Dict[str, psutil._common.sdiskio], psutil.disk_io_counters(perdisk=True)) for disk_name, disk_stats in disk_io_counters.items(): for field_name in ("read_count", "write_count", "read_bytes", "write_bytes", "read_time", "write_time", "busy_time"): profiler.marker(f"disk/{disk_name}/{field_name}", categories=["disk"]).counter({"field_name": getattr(disk_stats, field_name)}) if self.profile_net: net_io_counters = cast(Dict[str, psutil._common.snetio], psutil.net_io_counters(pernic=True)) for nic, nic_stats in net_io_counters.items(): profiler.marker(f"network/{nic}/kb_sent", categories=["net"]).counter({"kb_sent": nic_stats.bytes_sent / 2**3}) profiler.marker(f"network/{nic}/kb_recv", categories=["net"]).counter({"kb_recv": nic_stats.bytes_recv / 2**3}) time.sleep(self.stats_thread_interval_seconds)
processing.py
import logging from multiprocessing import Process, Queue from pathlib import Path from typing import Iterator, Sequence from configargparse import Namespace from gray.formatters import FORMATTERS, BaseFormatter, CompositeFormatter log = logging.getLogger(__name__) class FormattingError(Exception): exit_code = 1 def is_venv(path: Path): return all(( (path / "bin" / "python").exists(), (path / "pyvenv.cfg").is_file(), )) def gen_filepaths( paths: Sequence[Path], process_venv: bool = True, ) -> Iterator[Path]: for path in paths: if path.is_file() and (path.suffix == ".py"): yield path elif path.is_dir(): if is_venv(path) and not process_venv: log.warning( "%s looks like virtualenv directory. Skipping... ", path, ) log.warning("Use --do-not-detect-venv flag to turn this off") continue yield from path.glob("**/*.py") else: log.debug("Skipping %r", path) def fade_file(file_path: Path, formatter: BaseFormatter): log.debug("Going to process file %s", file_path) formatter.process(file_path) log.info("\"%s\" file was processed", file_path) def worker(tasks: Queue, result: Queue, formatter: BaseFormatter): fname = tasks.get() while fname is not None: err = None try: fade_file(fname, formatter) except Exception as e: log.exception("Failed to reformat file \"%s\"", fname) err = e result.put_nowait((fname, err)) fname = tasks.get() def process(arguments: Namespace): tasks = Queue() results = Queue() formatter = CompositeFormatter( *[FORMATTERS[k](arguments) for k in arguments.formatters], ) processes = [] for _ in range(arguments.pool_size): prc = Process(target=worker, args=(tasks, results, formatter)) processes.append(prc) prc.start() tasks_map = set() for fname in gen_filepaths(arguments.paths, arguments.do_not_detect_venv): tasks_map.add(fname) tasks.put_nowait(fname) failed = False wrong_files = [] try: while tasks_map: fname, exc = results.get() if exc: failed = True wrong_files.append(fname) tasks_map.remove(fname) if failed: for fname in wrong_files: log.error("Failed when processing \"%s\"", fname) raise FormattingError( "Formatting failed please check previous errors", wrong_files, ) finally: for _ in range(arguments.pool_size): tasks.put(None) for prc in processes: prc.join()
test_runner.py
from threading import Thread import datetime import logging import time import pytest from freezegun import freeze_time from logrotor.runner import Runner UDP_PORT = 1024 @pytest.fixture def runner(tmpdir): config = { 'flush_every_seconds': 5, 'rotate_every_seconds': 3600, 'storage': { 'path': str(tmpdir), 'size': 5, }, 'endpoints': [{ 'type': 'UdpEndpoint', 'port': UDP_PORT, }], } return Runner(config) @pytest.fixture def runner_thread(runner): return Thread(target=runner.run) def test_runner(send_udp, tmpdir, runner, runner_thread): logging.basicConfig(level=logging.DEBUG) runner_thread.start() time.sleep(1) send_udp('Message'.encode()) time.sleep(1) runner.stop() runner_thread.join(timeout=1) assert tmpdir.join('data', '0').read() == '127.0.0.1 Message\n' def test_runner_rotates_at_given_interval(send_udp, tmpdir, runner, runner_thread): with freeze_time('2017-07-28') as frozen_time: runner_thread.start() time.sleep(1) send_udp('Alice'.encode()) time.sleep(1) frozen_time.tick(delta=datetime.timedelta(seconds=3600)) time.sleep(1) send_udp('Bob'.encode()) time.sleep(1) runner.stop() runner_thread.join(timeout=1) assert tmpdir.join('data', '0').read() == '127.0.0.1 Alice\n' assert tmpdir.join('data', '1').read() == '127.0.0.1 Bob\n' def test_runner_does_not_schedule_rotation_in_0_seconds(tmpdir, runner, runner_thread): with freeze_time('2017-07-28 01:59:59,999') as frozen_time: runner_thread.start() time.sleep(1) runner.stop() runner_thread.join(timeout=1) assert tmpdir.join('current').readlink() == 'data/0' def test_runner_flushes_regularly(tmpdir, runner, runner_thread, send_udp): with freeze_time('2017-07-28') as frozen_time: runner_thread.start() time.sleep(1) send_udp('Alice'.encode()) time.sleep(5) file_content = tmpdir.join('data', '0').read() runner.stop() runner_thread.join(timeout=1) assert file_content == '127.0.0.1 Alice\n'
sonar_non443.py
import argparse import sys from multiprocessing import cpu_count, Process, Queue import logging from datetime import datetime from elasticsearch import Elasticsearch from elasticsearch.helpers import bulk, scan import hashlib import gzip import requests import os from helpers.certparser import process_cert from helpers.hostparser import proccess_host logger = logging.getLogger('SSLImporter') logger_format = logging.Formatter('\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():%(lineno)d %(asctime)s\033[0m| ' '%(message)s') stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(logger_format) logger.addHandler(stream_handler) elastic_logger = logging.getLogger('elasticsearch') elastic_logger.addHandler(stream_handler) DEFAULT_SERVER = u'localhost' DEFAULT_PORT = 9200 def parse_hosts_file(gzfile, queue): logger.warning("Opening file {f} at {d}".format(f=gzfile, d=datetime.now())) with gzip.open(gzfile, 'rb') as hostsfile: filedate = gzfile[0:8] for line in hostsfile: (host, port, certhash) = line.split(',', 2) host_data = dict() host_data['hash'] = certhash.strip('\n') host_data['host'] = host host_data['source'] = 'sonar' host_data['port'] = port host_data['last_seen'] = datetime.strptime(filedate, "%Y%m%d") queue.put(host_data) logger.warning("Closing file {f} at {d}".format(f=gzfile, d=datetime.now())) def process_hosts(q, es): """ :param q: The Queue object that hosts should be pulled off of :param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it across multiple workers/processes :return: """ bulk_hosts = [] while True: line = q.get() if line == "DONE": bulk(es, bulk_hosts) return True host = proccess_host(line) cert_hash = hashlib.sha1(host['host']+host['hash']+host['source']+host['port']) cert_hash = cert_hash.hexdigest() action = {"_op_type": "update", "_index": 'passive-ssl-non443-hosts-sonar', "_type": "host", "_id": cert_hash, "doc": host, "doc_as_upsert": "true"} bulk_hosts.append(action) if len(bulk_hosts) == 500: bulk(es, bulk_hosts) bulk_hosts = [] def update_hosts(q, es): bulk_update_hosts = [] while True: hosts = q.get() if hosts == "DONE": bulk(es, bulk_update_hosts) return True last_seen = hosts['_source']['last_seen'] first_seen = last_seen action = {"_op_type": "update", "_index": "passive-ssl-hosts-sonar", "_type": "host", "_id": hosts['_id'], "doc": {'first_seen': first_seen}} bulk_update_hosts.append(action) if len(bulk_update_hosts) == 500: bulk(es, bulk_update_hosts) bulk_update_hosts = [] def process_scan_certs(q, es, port): """ :param q: The Queue object that certs should be pulled off of :param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it across multiple workers/processes :param port: the port associated with the ssl scan that was done (25, 465, 993, 143 etc) :return: """ bulk_certs = [] while True: certs = q.get() if certs == "DONE": bulk(es, bulk_certs) return True newcert = process_cert(certs['certs']) if newcert: newcert['import_date'] = certs['time'] newcert['source'] = 'sonar' newcert['port'] = port cert_hash = hashlib.sha1(newcert['hash_id']+str(port)+newcert['source']) cert_hash = cert_hash.hexdigest() newcert_action = {"_index": "passive-ssl-non443-certs-sonar", "_type": "cert", '_id': cert_hash, '_source': newcert} bulk_certs.append(newcert_action) if len(bulk_certs) == 500: bulk(es, bulk_certs) bulk_certs = [] def parse_certs_file(gzfile, queue): filedate = gzfile[0:8] logger.warning("Opening file {f} at {d}".format(f=gzfile, d=datetime.now())) with gzip.open(gzfile, 'rb') as certfile: for line in certfile: raw_cert = dict() (certhash, cert) = line.split(',', 1) raw_cert['time'] = datetime.strptime(filedate, "%Y%m%d") raw_cert['certs'] = cert if raw_cert: queue.put(raw_cert) logger.warning("Closing file {f} at {d}".format(f=gzfile, d=datetime.now())) def main(argv): parser = argparse.ArgumentParser() parser.add_argument('--server', default=DEFAULT_SERVER, help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER)) parser.add_argument('--port', default=DEFAULT_PORT, help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT)) args = parser.parse_args(argv[1:]) workers = cpu_count() process_hosts_queue = Queue(maxsize=20000) process_certs_queue = Queue(maxsize=20000) update_hosts_queue = Queue(maxsize=20000) es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60) imported_sonar = es.search(index='scansio-sonar-ssl-non443-imported', body={"size": 3000, "query": {"match_all": {}} }) imported_files = [] for f in imported_sonar['hits']['hits']: imported_files.append(f['_id']) scansio_feed = requests.get('https://scans.io/json') if scansio_feed.status_code == 200: feed = scansio_feed.json() if 'studies' in feed: for result in feed['studies']: if result['name'] == "More SSL Certificates (non-443)": for res in result['files']: scans_file = res['name'] if scans_file.endswith('certs.gz'): if 'smtp_25' in scans_file: certfile = scans_file[52:86] port = 25 if 'smtp_465' in scans_file: certfile = scans_file[52:82] port = 465 if 'imap_993' in scans_file: certfile = scans_file[52:82] port = 993 if 'imap_143' in scans_file: certfile = scans_file[52:87] port = 143 if 'pop3_995' in scans_file: certfile = scans_file[52:82] port = 995 if certfile not in imported_files: logger.warning("We don't have {file} imported lets download it".format(file=certfile)) phys_file = requests.get(scans_file, stream=True) with open('{f}'.format(f=certfile), 'wb') as newcerts: for chunk in phys_file.iter_content(chunk_size=1024): if chunk: newcerts.write(chunk) with open('{f}'.format(f=certfile), 'rb') as fh: h = hashlib.sha1() while True: data = fh.read(8192) if not data: break h.update(data) sha1 = h.hexdigest() if sha1 == res['fingerprint']: for w in xrange(workers): queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60) p = Process(target=process_scan_certs, args=(process_certs_queue, queue_es, port)) p.daemon = True p.start() logger.warning("Importing {f} at {d}".format(f=certfile, d=datetime.now())) parse_certs_file(certfile, process_certs_queue) for w in xrange(workers): process_certs_queue.put("DONE") logger.warning("Importing finished of {f} at {d}".format(f=certfile, d=datetime.now())) es.index(index='scansio-sonar-ssl-non443-imported', doc_type='imported-file', id=certfile, body={'file': certfile, 'imported_date': datetime.now(), 'sha1': sha1}) else: logger.error("SHA1 did not match for {f} it was not imported".format(f=certfile)) os.remove(certfile) if scans_file.endswith('endpoints.gz'): if 'smtp_25' in scans_file: hostsfile = scans_file[52:90] port = 25 if 'smtp_465' in scans_file: hostsfile = scans_file[52:86] port = 465 if 'imap_993' in scans_file: hostsfile = scans_file[52:86] port = 993 if 'imap_143' in scans_file: hostsfile = scans_file[52:91] port = 143 if 'pop3_995' in scans_file: hostsfile = scans_file[52:86] port = 995 if hostsfile not in imported_files: logger.warning("We don't have {file} imported lets download it".format(file=hostsfile)) phys_host_file = requests.get(scans_file) with open('{f}'.format(f=hostsfile), 'wb') as hf: for chunk in phys_host_file.iter_content(chunk_size=1024): if chunk: hf.write(chunk) with open('{f}'.format(f=hostsfile), 'rb') as fh: h = hashlib.sha1() while True: data = fh.read(8192) if not data: break h.update(data) sha1 = h.hexdigest() if sha1 == res['fingerprint']: for w in xrange(workers): queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60) p = Process(target=process_hosts, args=(process_hosts_queue, queue_es)) p.daemon = True p.start() logger.warning("Importing {f} at {d}".format(f=hostsfile, d=datetime.now())) parse_hosts_file(hostsfile, process_hosts_queue) logger.warning("Hosts updated for {f} now going back and updating first_seen" .format(f=hostsfile)) update_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60) # construct an elasticsearch query where the filter is looking for any entry # that is missing the field first_seen # adding a queue processing system here this should hopefully speed things up. for work in xrange(workers): p = Process(target=update_hosts, args=(update_hosts_queue, update_es)) p.daemon = True p.start() q = {'size': 500, "query": {"match_all": {}}, "filter": {"missing": {"field": "first_seen"}}} new_updates = update_es.search(index='passive-ssl-non443-hosts-sonar', body=q) logger.warning("Numer of hosts to update is {count}" .format(count=new_updates['hits']['total'])) # Scan across all the documents missing the first_seen field and bulk update them missing_first_seen = scan(update_es, query=q, scroll='30m', index='passive-ssl-non443-hosts-sonar') for miss in missing_first_seen: update_hosts_queue.put(miss) # for some stupid reason I keep missing some at the end of the scan/scroll # so going to do them manually new_updates = update_es.search(index='passive-ssl-non443-hosts-sonar', body=q) logger.warning("Numer of hosts to update is {count}" .format(count=new_updates['hits']['total'])) missing_first_seen_again = scan(update_es, query=q, scroll='30m', index='passive-ssl-non443-hosts-sonar') bulk_update_missed = [] for m in missing_first_seen_again: last_seen = m['_source']['last_seen'] first_seen = last_seen action = {"_op_type": "update", "_index": "passive-ssl-non443-hosts-sonar", "_type": "host", "_id": m['_id'], "doc": {'first_seen': first_seen}} bulk_update_missed.append(action) if len(bulk_update_missed) == 500: bulk(update_es, bulk_update_missed) bulk_update_missed = [] bulk(update_es, bulk_update_missed) for w in xrange(workers): update_hosts_queue.put("DONE") logger.warning("Finished updating hosts at {d}".format(d=datetime.now())) # Get the remaining ones that are less than 500 and the loop has ended logger.warning("Importing finished of {f} at {d}".format(f=hostsfile, d=datetime.now())) es.index(index='scansio-sonar-ssl-non443-imported', doc_type='imported-file', id=hostsfile, body={'file': hostsfile, 'imported_date': datetime.now(), 'sha1': sha1}) os.remove(hostsfile) else: logger.error("The scans.io/json must have changed or is having issues. I didn't see any studies. Exiting") sys.exit() else: logger.error("There was an error connecting to https://scans.io. I did not get a 200 status code. Exiting") sys.exit() if __name__ == "__main__": main(sys.argv) logger.warning("Indexes have been created. Start indexing scans.io ssl at will now :)")
pyusb_backend.py
# pyOCD debugger # Copyright (c) 2006-2021 Arm Limited # Copyright (c) 2020 Patrick Huesmann # Copyright (c) 2021 mentha # Copyright (c) Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import threading from time import sleep import platform import errno from .interface import Interface from .common import ( USB_CLASS_HID, filter_device_by_class, is_known_cmsis_dap_vid_pid, generate_device_unique_id, ) from ..dap_access_api import DAPAccessIntf LOG = logging.getLogger(__name__) try: import usb.core import usb.util except ImportError: IS_AVAILABLE = False else: IS_AVAILABLE = True class PyUSB(Interface): """! @brief CMSIS-DAP USB interface class using pyusb for the backend. """ isAvailable = IS_AVAILABLE did_show_no_libusb_warning = False def __init__(self): super(PyUSB, self).__init__() self.ep_out = None self.ep_in = None self.dev = None self.intf_number = None self.serial_number = None self.kernel_driver_was_attached = False self.closed = True self.thread = None self.rcv_data = [] self.read_sem = threading.Semaphore(0) self.packet_size = 64 def open(self): assert self.closed is True # Get device handle dev = usb.core.find(custom_match=FindDap(self.serial_number)) if dev is None: raise DAPAccessIntf.DeviceError("Device %s not found" % self.serial_number) # get active config config = dev.get_active_configuration() # Get count of HID interfaces and create the matcher object hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID))) matcher = MatchCmsisDapv1Interface(hid_interface_count) # Get CMSIS-DAPv1 interface interface = usb.util.find_descriptor(config, custom_match=matcher) if interface is None: raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv1 interface" % self.serial_number) interface_number = interface.bInterfaceNumber # Find endpoints ep_in, ep_out = None, None for endpoint in interface: if endpoint.bEndpointAddress & usb.util.ENDPOINT_IN: ep_in = endpoint else: ep_out = endpoint # Detach kernel driver self.kernel_driver_was_attached = False try: if dev.is_kernel_driver_active(interface_number): LOG.debug("Detaching Kernel Driver of Interface %d from USB device (VID=%04x PID=%04x).", interface_number, dev.idVendor, dev.idProduct) dev.detach_kernel_driver(interface_number) self.kernel_driver_was_attached = True except (NotImplementedError, usb.core.USBError) as e: # Some implementations don't don't have kernel attach/detach LOG.warning("USB Kernel Driver Detach Failed ([%s] %s). Attached driver may interfere with pyOCD operations.", e.errno, e.strerror) # Explicitly claim the interface try: usb.util.claim_interface(dev, interface_number) except usb.core.USBError as exc: raise DAPAccessIntf.DeviceError("Unable to open device") from exc # Update all class variables if we made it here self.ep_out = ep_out self.ep_in = ep_in self.dev = dev self.intf_number = interface_number # Start RX thread as the last step self.closed = False self.start_rx() def start_rx(self): # Flush the RX buffers by reading until timeout exception try: while True: self.ep_in.read(self.ep_in.wMaxPacketSize, 1) except usb.core.USBError: # USB timeout expected pass # Start RX thread self.thread = threading.Thread(target=self.rx_task) self.thread.daemon = True self.thread.start() def rx_task(self): try: while not self.closed: self.read_sem.acquire() if not self.closed: self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000)) finally: # Set last element of rcv_data to None on exit self.rcv_data.append(None) @staticmethod def get_all_connected_interfaces(): """! @brief Returns all the connected CMSIS-DAP devices. returns an array of PyUSB (Interface) objects """ # find all cmsis-dap devices try: all_devices = usb.core.find(find_all=True, custom_match=FindDap()) except usb.core.NoBackendError: if not PyUSB.did_show_no_libusb_warning: LOG.warning("CMSIS-DAPv1 probes may not be detected because no libusb library was found.") PyUSB.did_show_no_libusb_warning = True return [] # iterate on all devices found boards = [] for board in all_devices: new_board = PyUSB() new_board.vid = board.idVendor new_board.pid = board.idProduct new_board.product_name = board.product or f"{board.idProduct:#06x}" new_board.vendor_name = board.manufacturer or f"{board.idVendor:#06x}" new_board.serial_number = board.serial_number \ or generate_device_unique_id(board.idProduct, board.idVendor, board.bus, board.address) boards.append(new_board) return boards def write(self, data): """! @brief Write data on the OUT endpoint associated to the HID interface """ report_size = self.packet_size if self.ep_out: report_size = self.ep_out.wMaxPacketSize for _ in range(report_size - len(data)): data.append(0) self.read_sem.release() if not self.ep_out: bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0) wValue = 0x200 #Issuing an OUT report wIndex = self.intf_number #mBed Board interface number for HID self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data) return self.ep_out.write(data) def read(self): """! @brief Read data on the IN endpoint associated to the HID interface """ while len(self.rcv_data) == 0: sleep(0) if self.rcv_data[0] is None: raise DAPAccessIntf.DeviceError("Device %s read thread exited" % self.serial_number) return self.rcv_data.pop(0) def close(self): """! @brief Close the interface """ assert self.closed is False LOG.debug("closing interface") self.closed = True self.read_sem.release() self.thread.join() assert self.rcv_data[-1] is None self.rcv_data = [] usb.util.release_interface(self.dev, self.intf_number) if self.kernel_driver_was_attached: try: self.dev.attach_kernel_driver(self.intf_number) except Exception as exception: LOG.warning('Exception attaching kernel driver: %s', str(exception)) usb.util.dispose_resources(self.dev) self.ep_out = None self.ep_in = None self.dev = None self.intf_number = None self.kernel_driver_was_attached = False self.thread = None class MatchCmsisDapv1Interface(object): """! @brief Match class for finding CMSIS-DAPv1 interface. This match class performs several tests on the provided USB interface descriptor, to determine whether it is a CMSIS-DAPv1 interface. These requirements must be met by the interface: 1. If there is more than one HID interface on the device, the interface must have an interface name string containing "CMSIS-DAP". 2. bInterfaceClass must be 0x03 (HID). 3. bInterfaceSubClass must be 0. 4. Must have interrupt in endpoint, with an optional interrupt out endpoint, in that order. """ def __init__(self, hid_interface_count): """! @brief Constructor.""" self._hid_count = hid_interface_count def __call__(self, interface): """! @brief Return True if this is a CMSIS-DAPv1 interface.""" try: if self._hid_count > 1: interface_name = usb.util.get_string(interface.device, interface.iInterface) # This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2. if (interface_name is None) or ("CMSIS-DAP" not in interface_name): return False # Now check the interface class to distinguish v1 from v2. if (interface.bInterfaceClass != USB_CLASS_HID) \ or (interface.bInterfaceSubClass != 0): return False # Must have either 1 or 2 endpoints. if interface.bNumEndpoints not in (1, 2): return False endpoint_attrs = [ (usb.util.endpoint_direction(ep.bEndpointAddress), usb.util.endpoint_type(ep.bmAttributes)) for ep in interface ] # Possible combinations of endpoints ENDPOINT_ATTRS_ALLOWED = [ # One interrupt endpoint IN [(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR)], # Two interrupt endpoints, first one IN, second one OUT [(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR), (usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR)], # Two interrupt endpoints, first one OUT, second one IN [(usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR), (usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR)], ] if endpoint_attrs not in ENDPOINT_ATTRS_ALLOWED: return False # All checks passed, this is a CMSIS-DAPv2 interface! return True except (UnicodeDecodeError, IndexError): # UnicodeDecodeError exception can be raised if the device has a corrupted interface name. # Certain versions of STLinkV2 are known to have this problem. If we can't read the # interface name, there's no way to tell if it's a CMSIS-DAPv2 interface. # # IndexError can be raised if an endpoint is missing. return False class FindDap(object): """! @brief CMSIS-DAP match class to be used with usb.core.find""" def __init__(self, serial=None): """! @brief Create a new FindDap object with an optional serial number""" self._serial = serial def __call__(self, dev): """! @brief Return True if this is a DAP device, False otherwise""" # Check if the device class is a valid one for CMSIS-DAP. if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass): return False try: # First attempt to get the active config. This produces a more direct error # when you don't have device permissions on Linux config = dev.get_active_configuration() # Now read the product name string. device_string = dev.product if (device_string is None) or ("CMSIS-DAP" not in device_string): return False # Get count of HID interfaces. hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID))) # Find the CMSIS-DAPv1 interface. matcher = MatchCmsisDapv1Interface(hid_interface_count) cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=matcher) except usb.core.USBError as error: if error.errno == errno.EACCES and platform.system() == "Linux": msg = ("%s while trying to interrogate a USB device " "(VID=%04x PID=%04x). This can probably be remedied with a udev rule. " "See <https://github.com/pyocd/pyOCD/tree/master/udev> for help." % (error, dev.idVendor, dev.idProduct)) # If we recognize this device as one that should be CMSIS-DAP, we can raise # the level of the log message since it's almost certainly a permissions issue. if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct): LOG.warning(msg) else: LOG.debug(msg) else: LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error) return False except (IndexError, NotImplementedError, ValueError, UnicodeDecodeError) as error: LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error) return False if cmsis_dap_interface is None: return False if self._serial is not None: if self._serial == "" and dev.serial_number is None: return True if self._serial != dev.serial_number: return False return True
NOPE.py
from tda.auth import easy_client from tda import auth import pandas as pd import numpy as np import os from datetime import datetime import atexit import json import itertools import threading import pandas_market_calendars as mcal from pprint import pprint from sqlalchemy import create_engine import psycopg2 as pg # Set Tickers to Pull Data For equity_tickers = ['SPY', 'PSTH', 'AMD', 'THCB', 'APHA', 'QQQ', 'AAPL', 'PLTR', 'TSM'] # Get Current Date for Both Folder Organization and Getting Market Calendar today = datetime.now().strftime('%Y-%m-%d') # Gets NYSE Market Calendar for Current Day nyse = mcal.get_calendar('NYSE') market_cal_today = nyse.schedule(start_date = today, end_date = today, tz = 'America/Chicago') # Checks if Market is Open Today, if Closed then Script Exits if market_cal_today.empty == True: import sys sys.exit('Market is Close Today') engine = create_engine('postgresql://haydenrose@localhost:5432/stock_data') # Opens TD Ameritrade Account Info and Sets Webdriver Path for Selenium account_id=open(r'/Users/haydenrose/Python/API Keys/TD/TD_ACCOUNT_ID.txt').read() consumer_key=open(r'/Users/haydenrose/Python/API Keys/TD/TD_CONSUMER_KEY.txt').read() redirect_uri='http://localhost' token_path=r'/Users/haydenrose/Python/API Keys/TD/ameritrade-credentials.json' geckodriver_path=r'/Users/haydenrose/Webdrivers/geckodriver' # Creates Webdriver for Selenium def make_webdriver(): # Import selenium here because it's slow to import from selenium import webdriver driver = webdriver.Firefox(executable_path = geckodriver_path) atexit.register(lambda: driver.quit()) return driver # Sets td-api Client Object. # Will Create Refresh Token with OAUTH and Grab With Selenium # if it Doesn't Exist in Working Folder. c = easy_client(consumer_key, redirect_uri, token_path, make_webdriver) def options_chain_cleaner(options_chain, only_type=False): """ Takes unformatted option chain csv and returns cleaned up df. Specify only_type='Calls' or 'Puts' if only wanting one or other, specify False if wanting both and 2 dataframes will be returned, calls first and puts second. i.e. calls, puts = func('file.csv') """ if only_type == 'Calls': Calls = options_chain['callExpDateMap'].values() call_option_list = [] for i in Calls: for j in i.values(): for k in j: call_option_list.append(k) Calls_df = pd.DataFrame(call_option_list) Calls_df.set_index('description', inplace=True) return Calls_df elif only_type == 'Puts': Puts = options_chain['putExpDateMap'].values() put_option_list = [] for i in Puts: for j in i.values(): for k in j: put_option_list.append(k) Puts_df = pd.DataFrame(put_option_list) Puts_df.set_index('description', inplace=True) return Puts_df elif only_type == False: Puts=options_chain['putExpDateMap'].values() Calls=options_chain['callExpDateMap'].values() call_option_list = [] for i in Calls: for j in i.values(): for k in j: call_option_list.append(k) Calls_df=pd.DataFrame(call_option_list) Calls_df.set_index('description', inplace=True) put_option_list = [] for i in Puts: for j in i.values(): for k in j: put_option_list.append(k) Puts_df = pd.DataFrame(put_option_list) Puts_df.set_index('description', inplace=True) return Calls_df, Puts_df else: raise ValueError('Incorrect only_type value') # Sets Dictionaries for Call, Put, and Equity Data # with format {'Ticker Symbol' : DataFrame of Data} call_chains = {} put_chains = {} equity_quotes = {} def get_option_chains(ticker): """ Gets option chains for specified symbols using given ticker symbol list named equity_tickers. Appends to dictionary as {'Ticker Symbol' : call chain in DataFrame}, {'Ticker Symbol' : put chain in DataFrame} """ options_chain = c.get_option_chain(symbol=ticker, strike_range=c.Options.StrikeRange.ALL) calls_chain, puts_chain = options_chain_cleaner(options_chain.json()) calls_chain['date'] = today puts_chain['date'] = today call_chains[ticker] = calls_chain[calls_chain['delta'] != -999.0] put_chains[ticker] = puts_chain[puts_chain['delta'] != -999.0] thread_list=[] def get_option_chains_threader(): for ticker in equity_tickers: threadProcess = threading.Thread(name='simplethread', target=get_option_chains, args=[ticker]) thread_list.append(threadProcess) for thread in thread_list: thread.start() for thread in thread_list: thread.join() def get_quotes(ticker): """ Gets quotes for specified equities using given ticker symbol list named equity_tickers. Appends to dictionary as {'Ticker Symbol' : quote in DataFrame}. """ quotes = c.get_quotes(symbols = ticker) equity_quotes[ticker] = pd.DataFrame(quotes.json()).T equity_quotes[ticker].drop(columns = ['52WkHigh', '52WkLow'], inplace = True) equity_quotes[ticker].replace({'': np.nan, ' ': np.nan}, inplace = True) thread_list2=[] def get_quotes_threader(): for ticker in equity_tickers: threadProcess = threading.Thread(name='simplethread', target=get_quotes, args=[ticker]) thread_list2.append(threadProcess) for thread in thread_list2: thread.start() for thread in thread_list2: thread.join() def NOPE(call_volumes: float or int, put_volumes: float or int, call_deltas: float, put_deltas: float, share_volume: float or int): """ Calculates NOPE, takes volumes and deltas as pandas Series and share volume as int. """ result = (sum((((call_volumes*100).mul(call_deltas*100, fill_value=0)).values-((put_volumes*100).mul(abs(put_deltas*100), fill_value=0)).values)))/share_volume return result def high_option_checker(option_volume: list or int or float, share_volume: int): """ Returns a number used to determine how "optioned" a ticker is. A value > say 0.4 means NOPE_MAD provides a fairly good window into predicting earnings behavior. """ result = np.nansum(option_volume)*100/share_volume return result # Set Dictionary for Call, Put, Share Volume and Delta Data call_deltas={} put_deltas={} call_volumes={} put_volumes={} share_volume = {} call_delta_sum = {} put_delta_sum = {} call_volume_sum = {} put_volume_sum = {} def delta_volumes(): # Appends Call, Put, Share Volume and Delta to Dictionaries # with format {'Ticker Symbol' : option delta or volume as series or share volume as int} for ticker in equity_tickers: call_deltas[ticker] = call_chains[ticker]['delta'].astype(float) put_deltas[ticker] = put_chains[ticker]['delta'].astype(float) call_volumes[ticker] = call_chains[ticker]['totalVolume'].astype(float) put_volumes[ticker] = put_chains[ticker]['totalVolume'].astype(float) share_volume[ticker] = int(equity_quotes[ticker]['totalVolume']) call_delta_sum[ticker]=call_chains[ticker]['delta'].astype(float).sum() put_delta_sum[ticker]=put_chains[ticker]['delta'].astype(float).sum() call_volume_sum[ticker]=call_chains[ticker]['totalVolume'].astype(float).sum() put_volume_sum[ticker]=put_chains[ticker]['totalVolume'].astype(float).sum() def _to_sql(): """ Stores option chains to csv using given ticker symbol list named equity_tickers to name. Saves with filename: {'Ticker Symbol'} Call Option Chain.csv or {'Ticker Symbol'} Put Option Chain.csv """ for ticker in equity_tickers: call_chain_to_sql = call_chains[ticker].copy() call_chain_to_sql.columns = call_chain_to_sql.columns.str.lower() call_chain_to_sql.to_sql('option_chains', con = engine, if_exists = 'append') put_chain_to_sql = put_chains[ticker].copy() put_chain_to_sql.columns = put_chain_to_sql.columns.str.lower() put_chain_to_sql.to_sql('option_chains', con = engine, if_exists = 'append') equity_quotes_to_sql = equity_quotes[ticker].copy() equity_quotes_to_sql.columns = equity_quotes_to_sql.columns.str.lower() equity_quotes_to_sql.to_sql('equity_quotes', con = engine, if_exists = 'append', index = False) # Finds NOPE Value Using Function and Appends to Dictionary # With Format {'Ticker Symbol' : NOPE value} NOPE_value = NOPE(call_deltas = call_deltas[ticker], put_deltas = put_deltas[ticker], call_volumes = call_volumes[ticker], put_volumes = put_volumes[ticker], share_volume = share_volume[ticker]) # Runs Function to Determine the Optioned Rate and append # to Dictionary with Form {'Ticker Symbol' : optioned rate} optionality = high_option_checker(option_volume = [call_volume_sum[ticker], put_volume_sum[ticker]], share_volume = share_volume[ticker]) #Creates dataframe with NOPE, optionality, and date data = {'nopevalue': NOPE_value, 'optionality': optionality, 'symbol': ticker, 'date': today} NOPE_valuedf = pd.DataFrame(data, index = [0]) #Saves to SQL table NOPE_to_sql = NOPE_valuedf NOPE_to_sql.to_sql('nope_values', con = engine, if_exists = 'append', index = False) get_option_chains_threader() get_quotes_threader() delta_volumes() _to_sql()
tenet_stun.py
from threading import Thread from time import sleep, time from Tenet.sock_stun import udpSocket ''' #### Tenet库,网络传输库,实现连入互联网的客户端点对点传输。 // 使用时可能需要关闭电脑的防火墙 // #### ''' ###### Tenet ##### class Tenet_stun: def __init__(self, server_addr=('127.0.0.1', 9084), group='A'): self.group = group self.server_addr = server_addr self.serve = True self.address = None self.nodes = [] self.lock = False pass ### 注册函数,向服务器发送信息并返回心跳地址 ### def sign(self, info={'id':None, 'passward':None}, timeout=60): ########### info['group'] = self.group sign_sock = udpSocket(data_addr=self.server_addr) ### 计时,每5秒打印一次连接状况 ### last_time = time() start_time = time() while self.serve: sign_sock.send(info) data = sign_sock.recv() if type(data) == str: print(data) continue if type(data) == list: data[0] = self.server_addr[0] self.heart_addr = tuple(data) print('< mode: stun> connect to '+str(self.server_addr)+' success...') print('< mode: stun> heart_addr: '+str(self.heart_addr)) return True if time() - last_time >= 4: print('< mode: stun> trying to connect to '+str(self.server_addr)) last_time=time() if time() - start_time > timeout: print('< mode: stun> connect to '+str(self.server_addr)+' failed...') sign_sock.sock.close() self.serve = False return False sleep(0.2) pass pass def create_node(self, name): # create a new data transfer node node = udpSocket(heart_addr=self.heart_addr) node.name = name self.nodes.append(node) return node def delete_node(self, node): self.nodes.remove(node) del node pass def update(self): last_time = time() name = self.group + '-00' update_sock = udpSocket(heart_addr=self.heart_addr) update_sock.data_addr = self.heart_addr update_sock.name = name ##################### while self.serve: update_sock.heartbit() data = update_sock.recv() sleep(0.2) if data != None: print('get address seccuss: '+str(data)) self.address = data break pass ####################### 初次分配节点 flag = 'A' if self.group == 'A': flag = 'B' if self.group == 'B': flag = 'A' for node in self.nodes: name = node.name name = name.replace(name[0], flag) try: node.data_addr = self.address[name] node.serve = True # print('node_name:' + node.name + ' data_addr: ' + str(node.data_addr)) except Exception as e: node.serve = False #print('address update error') pass ######################初次分配节点 while self.serve: if time() - last_time >= 4: update_sock.heartbit() last_time = time() ######## if self.lock == True: update_sock.send('lock') pass if self.lock == False: update_sock.send('unlock') pass pass data = update_sock.recv() if data == None: continue self.address = data pass def maintain(self): # 维护器,更新发送的地址信息给各个节点 flag = 'A' if self.group == 'A': flag = 'B' if self.group == 'B': flag = 'A' for node in self.nodes: node.heartbit() pass ############ while self.serve: sleep(32) for node in self.nodes: name = node.name name = name.replace(name[0], flag) try: node.data_addr = self.address[name] node.serve = True #print('node_name:' + node.name + ' data_addr: ' + str(node.data_addr)) except Exception as e: node.serve = False #print('address update error') pass node.heartbit() pass pass pass def run(self): if self.serve == False: return t1 = Thread(target=self.update) print('update node start success .....') sleep(0.2) t2 = Thread(target=self.maintain) print('maintain node start success .....') t1.start() t2.start() pass def __del__(self): self.serve = False try: for node in self.nodes: node.sock.close() pass except: pass pass
train.py
#!/usr/bin/env python """Train models.""" import os import signal import torch import onmt.opts as opts import onmt.utils.distributed from onmt.utils.logging import logger from onmt.train_single import main as single_main from onmt.utils.parse import ArgumentParser def main(opt): ArgumentParser.validate_train_opts(opt) ArgumentParser.update_model_opts(opt) ArgumentParser.validate_model_opts(opt) nb_gpu = len(opt.gpu_ranks) if opt.world_size > 1: mp = torch.multiprocessing.get_context('spawn') # Create a thread to listen for errors in the child processes. error_queue = mp.SimpleQueue() error_handler = ErrorHandler(error_queue) # Train with multiprocessing. procs = [] for device_id in range(nb_gpu): procs.append(mp.Process(target=run, args=( opt, device_id, error_queue, ), daemon=True)) procs[device_id].start() logger.info(" Starting process pid: %d " % procs[device_id].pid) error_handler.add_child(procs[device_id].pid) for p in procs: p.join() elif nb_gpu == 1: # case 1 GPU only single_main(opt, 0) else: # case only CPU single_main(opt, -1) def run(opt, device_id, error_queue): """ run process """ try: gpu_rank = onmt.utils.distributed.multi_init(opt, device_id) if gpu_rank != opt.gpu_ranks[device_id]: raise AssertionError("An error occurred in \ Distributed initialization") single_main(opt, device_id) except KeyboardInterrupt: pass # killed by parent, do nothing except Exception: # propagate exception to parent process, keeping original traceback import traceback error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc())) class ErrorHandler(object): """A class that listens for exceptions in children processes and propagates the tracebacks to the parent process.""" def __init__(self, error_queue): """ init error handler """ import signal import threading self.error_queue = error_queue self.children_pids = [] self.error_thread = threading.Thread( target=self.error_listener, daemon=True) self.error_thread.start() signal.signal(signal.SIGUSR1, self.signal_handler) def add_child(self, pid): """ error handler """ self.children_pids.append(pid) def error_listener(self): """ error listener """ (rank, original_trace) = self.error_queue.get() self.error_queue.put((rank, original_trace)) os.kill(os.getpid(), signal.SIGUSR1) def signal_handler(self, signalnum, stackframe): """ signal handler """ for pid in self.children_pids: os.kill(pid, signal.SIGINT) # kill children processes (rank, original_trace) = self.error_queue.get() msg = """\n\n-- Tracebacks above this line can probably be ignored --\n\n""" msg += original_trace raise Exception(msg) def _get_parser(): parser = ArgumentParser(description='train.py') opts.config_opts(parser) opts.model_opts(parser) opts.train_opts(parser) return parser if __name__ == "__main__": parser = _get_parser() opt = parser.parse_args() main(opt)
__init__.py
import subprocess import os, sys, threading, time from datetime import datetime CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(CURRENT_DIR, "..", "..")) import constants APT_GET_UPDATES = "apt-get upgrade -s" PIP_UPDATES = "pip list -o" OUTDATED_NOTIFICATION = "There are %d outdated packages in %s" should_continue = True def get_num_apt_upgrades(): global APT_GET_UPDATES apt_update_string = subprocess.check_output(APT_GET_UPDATES.split(" ")) significant_data = None for line in apt_update_string.split("\n"): if "newly installed" in line and "to remove" in line: significant_data = line return int(significant_data.split(" upgraded")[0]) def get_num_pip_upgrades(): global PIP_UPDATES with open(os.devnull, 'w') as devnull: pip_update_string = subprocess.check_output(PIP_UPDATES.split(" "), stderr=devnull) return len(pip_update_string.strip().split("\n")) def display_notification(service, num): global OUTDATED_NOTIFICATION feedback = OUTDATED_NOTIFICATION % (num, service) os.system(constants.DISPLAY_NOTIFICATION % (feedback,)) def hourly_check(): global should_continue while should_continue: if datetime.now().minute == 0: apt_num = get_num_apt_upgrades() pip_num = get_num_pip_upgrades() if apt_num > 0: display_notification("Apt", apt_num) if pip_num > 0: display_notification("Pip", pip_num) time.sleep(60) GLOBAL_THREAD = None def start(): global GLOBAL_THREAD if GLOBAL_THREAD is not None: return GLOBAL_THREAD = threading.Thread(target=hourly_check) GLOBAL_THREAD.daemon = True GLOBAL_THREAD.start() def stop(): global should_continue should_continue = False if __name__ == "__main__": print get_num_apt_upgrades() print get_num_pip_upgrades()
sb-wa-stt.py
# # K9 Conversation by Richard Hopkins using - # # Kitt-AI Snowboy for hotword recognition # Watson Speech to Text (streaming to sockets) # Watson Assistant for Conversation # eSpeak Text to Speech # Robot status displayed with Adafruit PWM Servo Driver driving LED brightness # # Snowboy elements derived from # Kitt-AI/snowboy/examples/Python/demo.py # # March 2018 # # Released under The Unlicense # from watson_developer_cloud import ConversationV1 from watson_developer_cloud import SpeechToTextV1 from watson_developer_cloud.websocket import RecognizeCallback import Adafruit_PCA9685 import os, sys, signal, snowboydecoder, re, base64, json, ssl, subprocess, threading, time variables = ['WCpassword','WCusername','WCworkspace','WTTSpassword','WTTSusername'] for variable in variables: if variable in os.environ: pass else: print "Please set the environment variable " + variable sys.exit(1) # Initialising TTS global variables speech_received = False # has speech been returned by Watson? transcript = "silence" # default value for speech if nothing returned # Initialise snowboy global variables model = "./K9.pmdl" interrupted = False # Initialise conversation global variables conversation = ConversationV1( username=os.environ['WCusername'], password=os.environ['WCpassword'], version='2018-02-16') workspace_id = os.environ['WCworkspace'] print "Initialising speech to text..." # Initialise speech to text global variable stt_client = SpeechToTextV1( username=os.environ['WTTSusername'], password=os.environ['WTTSpassword'], url='https://stream.watsonplatform.net/speech-to-text/api') # Initialise the PWM device using the default address pwm = 0 #pwm = Adafruit_PCA9685.PCA9685() #pwm.set_pwm_freq(100) # Set frequency to 100 Hz # Create names for each PWM channel PWM_eye = 0 PWM_hover = 1 def signal_handler(signal, frame): global interrupted interrupted = True def interrupt_callback(): global interrupted return interrupted signal.signal(signal.SIGINT, signal_handler) detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5) # Web Socket object for communicating to Watson Developer Cloud # Test to Speech # Example using websockets class MyRecognizeCallback(RecognizeCallback): def __init__(self): RecognizeCallback.__init__(self) def on_transcription(self, transcript): print(transcript) def on_connected(self): print('Connection was successful') self.stream_audio_thread = threading.Thread(target=self.stream_audio) self.stream_audio_thread.start() def on_error(self, error): print('Error received: {}'.format(error)) set_PWM(PWM_eye,3) speech_received = True self.listening = False def on_inactivity_timeout(self, error): print('Inactivity timeout: {}'.format(error)) def on_listening(self): self.listening = True set_PWM(PWM_eye,100) print('Service is listening') def on_transcription_complete(self): set_PWM(PWM_eye,3) self.stream_audio_thread.join() self.listening = False speech_received = True print('Transcription completed') def on_hypothesis(self, hypothesis): print(hypothesis) def stream_audio(self): print "Waiting for audio..." while not self.listening: time.sleep(0.1) reccmd = ["arecord", "-f", "S16_LE", "-r", "16000", "-t", "raw"] print "Starting to send audio to STT..." p = subprocess.Popen(reccmd, stdout=subprocess.PIPE) while self.listening: data = p.stdout.read(1024) try: self.send_audio(bytearray(data)) except ssl.SSLError: pass p.kill() print "Audio sent." # K9 hotword has been detected def K9_detected(): global pwm print "K9 hotword detected...\n" set_PWM(PWM_eye,30) global stop_now stop_now = True # get the detector to terminate def speech_to_text(): global transcript global speech_received speech_received = False # has speech been returned by Watson? transcript = "silence" # default value for speech if nothing returned mycallback = MyRecognizeCallback() while not speech_received: time.sleep(0.1) return transcript def stop_snowboy(): global stop_now if stop_now = True: print "Snowboy stop interrupt." return stop_now # Sets brightness of PWM lights from 0 to 100 def set_PWM(light, brightness): global pwm light = int(light) brightness = int(float(brightness)*40.95) if light >=0 and light <=15: # check that PWM channel exists if brightness >= 0 and brightness <= 4095: # check that frequency is valid #pwm.set_pwm(0,light,brightness) print "Eye brightness set to: " + str(brightness) # Initialise the eye lights at 3% set_PWM(PWM_eye,3) go = True while go: interrupted = False stop_now = False print "Listening for K9 keyword... press Ctrl+C to exit" detector.start(detected_callback=K9_detected, interrupt_check=stop_snowboy, sleep_time=0.03) detector.terminate() time.sleep(0.03) speech_received = False transcript = "silence" print "Calling speech_to_text" speech_to_text() print "To conversation: " + transcript response = conversation.message(workspace_id=workspace_id, input={'text':transcript}) results = re.search('\], u\'text\': \[u\'(.*)\'\]\}, u\'alt', str(response)) answer = results.group(1) answer = './tts ' + answer print str(answer) subprocess.call(answer, shell=True)
test_autograd.py
import gc import io import math import os import random import sys import tempfile import threading import time import unittest import uuid import warnings from copy import deepcopy from collections import OrderedDict from itertools import product, permutations from operator import mul from functools import reduce, partial import torch from torch import nn from torch._six import inf, nan from torch.autograd.function import once_differentiable from torch.autograd.profiler import (profile, record_function, emit_nvtx) from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg) import torch.autograd.functional as autogradF from torch.utils.checkpoint import checkpoint from torch.testing import make_tensor from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack, suppress_warnings, slowTest, IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck, TEST_WITH_ROCM, disable_gc, gradcheck, gradgradcheck) from torch.autograd import Variable, Function, detect_anomaly, kineto_available from torch.autograd.function import InplaceFunction import torch.autograd.forward_ad as fwAD from torch.testing._internal.common_methods_invocations import mask_not_all_zeros from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm, onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA, deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan, skipCUDAIf, skipMeta) from torch.testing._internal.common_dtype import get_all_dtypes import pickle def graph_desc(fn): if fn is None: return 'None' result = type(fn).__name__ + '(' next_functions = fn.next_functions for next_fn, _ in next_functions: result += graph_desc(next_fn) result += ', ' if next_functions: result = result[:-2] return result + ')' class TestAutograd(TestCase): def test_tensor_grad_warnings(self): dummy = torch.empty(1) with warnings.catch_warnings(record=True) as w: # Accessing .grad on leaf dummy.requires_grad_() foo = dummy.grad self.assertEqual(len(w), 0) # Accessing .grad on non-leaf dummy = dummy.clone() foo = dummy.grad self.assertEqual(len(w), 1) # Accessing .grad on non-leaf that retains gradients dummy.retain_grad() foo = dummy.grad self.assertEqual(len(w), 1) def _function_test(self, cls): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) result = cls.apply(x, 2, y) go = torch.ones((), requires_grad=True) result.sum().backward(go, create_graph=True) self.assertEqual(x.grad, y + torch.ones(5, 5)) self.assertEqual(y.grad, x + torch.ones(5, 5) * 2) self.assertIsNotNone(x.grad.grad_fn) self.assertIsNotNone(y.grad.grad_fn) return x, y def test_function(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, pyscalar, tensor2): ctx.pyscalar = pyscalar ctx.save_for_backward(tensor1, tensor2) return tensor1 + pyscalar * tensor2 + tensor1 * tensor2 @staticmethod def backward(ctx, grad_output): var1, var2 = ctx.saved_tensors # NOTE: self is the test case here self.assertIsInstance(var1, torch.Tensor) self.assertIsInstance(var2, torch.Tensor) self.assertIsInstance(grad_output, torch.Tensor) return (grad_output + grad_output * var2, None, grad_output * ctx.pyscalar + grad_output * var1) x, y = self._function_test(MyFunction) x_grad_desc = graph_desc(x.grad.grad_fn) y_grad_desc = graph_desc(y.grad.grad_fn) self.assertExpected(x_grad_desc, "x_grad_desc") self.assertExpected(y_grad_desc, "y_grad_desc") def test_once_differentiable(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, pyscalar, tensor2): ctx.pyscalar = pyscalar ctx.save_for_backward(tensor1, tensor2) return tensor1 + pyscalar * tensor2 + tensor1 * tensor2 @staticmethod @once_differentiable def backward(ctx, grad_output): self.assertFalse(torch.is_grad_enabled()) t1, t2 = ctx.saved_tensors return (grad_output + grad_output * t2, None, grad_output * ctx.pyscalar + grad_output * t1) x, y = self._function_test(MyFunction) self.assertEqual(graph_desc(x.grad.grad_fn), 'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))') self.assertEqual(graph_desc(y.grad.grad_fn), 'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))') def test_function_returns_input(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): return grad * 2 for shape in [(1,), ()]: v = torch.ones(shape, requires_grad=True) MyFunction.apply(v).backward() self.assertEqual(v.grad, torch.full(shape, 2.)) with torch.no_grad(): v.grad.zero_() MyFunction.apply(v.clone()).backward() self.assertEqual(v.grad, torch.full(shape, 2.)) def test_function_returns_undefined_tensor(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x * 2 @staticmethod def backward(ctx, grad): return None # Test that undefined tensors returned from custom backward function # are propagated as undefined and not tensor full of zeroes x = torch.ones(1, requires_grad=True) MyFunction.apply(x).backward() self.assertIsNone(x.grad) MyFunction.apply(x ** 2).backward() self.assertIsNone(x.grad) MyFunction.apply(x).sum().backward() self.assertIsNone(x.grad) self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0]) def test_materialize_grads(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): self.assertEqual(grad, torch.zeros(1)) return grad x = torch.ones(1, requires_grad=True) torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward() def test_dont_materialize_grads(self): class MyFunction(Function): @staticmethod def forward(ctx, x): ctx.set_materialize_grads(False) return x @staticmethod def backward(ctx, grad): self.assertIsNone(grad) return grad x = torch.ones(1, requires_grad=True) torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward() def test_legacy_function_deprecation_exception(self): # Trigger exception class MyFunction(Function): def forward(self, x): return x def backward(self, grad_output): return grad_output # Check exception occurs with self.assertRaisesRegex( RuntimeError, 'Legacy autograd function with non-static forward method is deprecated'): MyFunction()(torch.randn(3, 4)) class SimulateBackwardError(Function): @staticmethod def forward(ctx, input): return input.clone() @staticmethod @once_differentiable def backward(ctx, input): raise Exception("Simulate error on backward pass") def test_custom_function_exception(self): t1 = torch.rand((3, 3), requires_grad=True) t2 = torch.rand((3, 3), requires_grad=True) tmp = (t1 + t2) * (t1 + t2) t3 = TestAutograd.SimulateBackwardError.apply(tmp) with self.assertRaisesRegex(Exception, "Simulate error on backward pass"): t3.sum().backward() def test_custom_function_non_tensor_inputs_outputs(self): class MyFunction(Function): @staticmethod def forward(ctx, t1, t2, scale, t3): t4 = t1 + t2 * t3 t5 = t1 * t2 + t3 t4 *= scale t5 *= scale # Save scale ctx.scale = scale ctx.save_for_backward(t1, t2, t3) return scale, t4, None, True, t5, "bar", t1 @staticmethod @once_differentiable def backward(ctx, *grads): # Verify grads self.assertEqual(7, len(grads)) self.assertIsNone(grads[0]) self.assertIsNone(grads[2]) self.assertIsNone(grads[3]) self.assertIsNone(grads[5]) scale = ctx.scale var1, var2, var3 = ctx.saved_tensors return ( grads[1] * scale + grads[4] * var2 * scale + grads[6], grads[1] * var3 * scale + grads[4] * var1 * scale, None, grads[1] * var2 * scale + grads[4] * scale, ) t1 = torch.rand(10, dtype=torch.double, requires_grad=True) t2 = torch.rand(10, dtype=torch.double, requires_grad=True) t3 = torch.rand(10, dtype=torch.double) scale = random.randint(0, 10) res = MyFunction.apply(t1, t2, scale, t3) self.assertEqual(scale, res[0]) self.assertEqual((t1 + t2 * t3) * scale, res[1]) self.assertEqual(None, res[2]) self.assertEqual(True, res[3]) self.assertEqual((t1 * t2 + t3) * scale, res[4]) self.assertEqual("bar", res[5]) self.assertEqual(t1, res[6]) # Validate running backward. torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()]) self.assertIsNotNone(t1.grad) self.assertIsNotNone(t2.grad) self.assertIsNone(t3.grad) # Test gradcheck def foo(t1, t2, t3): res = MyFunction.apply(t1, t2, scale, t3) return res[1], res[4], res[6] gradcheck(foo, (t1, t2, t3)) def test_custom_function_no_tensors(self): class MyFunction(Function): @staticmethod def forward(ctx, t1, t2, scale, t3): t4 = t1 + t2 * t3 t5 = t1 * t2 + t3 t4 *= scale t5 *= scale return scale, t4, None, True, t5, "bar", t1 @staticmethod @once_differentiable def backward(ctx, *args): return (args[0], args[1], None, args[2]) t1 = random.random() t2 = random.random() t3 = random.random() scale = random.randint(0, 10) res = MyFunction.apply(t1, t2, scale, t3) self.assertEqual(scale, res[0]) self.assertEqual((t1 + t2 * t3) * scale, res[1]) self.assertEqual(None, res[2]) self.assertEqual(True, res[3]) self.assertEqual((t1 * t2 + t3) * scale, res[4]) self.assertEqual("bar", res[5]) self.assertEqual(t1, res[6]) def test_invalid_gradients(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x * 2 @staticmethod def backward(ctx, grad_output): return torch.randn(10, dtype=torch.float) with self.assertRaisesRegex(RuntimeError, 'expected shape'): input = torch.randn(5, 5, dtype=torch.float, requires_grad=True) MyFunction.apply(input).sum().backward() def test_unrelated_inputs(self): # test to ensure grad(grad)check runs successfully even if there is an # unrelated (but differentiable) inputs def my_function(x, y): return x * x x = torch.rand(10, dtype=torch.double, requires_grad=True) y = torch.rand(10, dtype=torch.double, requires_grad=True) gradcheck(my_function, (x, y)) gradgradcheck(my_function, (x, y)) def test_not_implemented_grad(self): a = torch.rand(2, requires_grad=True) # if grad for nextafter ends up being implemented, this should be changed y = torch.nextafter(a, a).sum() with self.assertRaisesRegex( NotImplementedError, 'the derivative for .* is not implemented'): y.backward() def test_not_implemented_fwad(self): x = torch.randn(3) v = torch.rand(3) mat = torch.randn(2, 3) with fwAD.dual_level(): dual_x = fwAD.make_dual(x, v) err_msg = r"Trying to use forward AD with .* that does not support it" hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError" with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg): # if forward AD ends up being implemented for torch.mv, choose a different op res = torch.mv(mat, dual_x) def test_accumulate_grad(self): grad_output = torch.ones(5, 5) def compute_grad(create_graph): x = torch.randn(5, 5, requires_grad=True) y = x + 2 y.backward(grad_output, retain_graph=True) x_grad = x.grad x_grad_clone = x.grad.clone() y.backward(grad_output, create_graph=create_graph) return x_grad, x_grad_clone # Accumulate in-place when create_graph is False x_grad, x_grad_clone = compute_grad(create_graph=False) self.assertEqual(x_grad, x_grad_clone * 2) # Accumulate out-of-place when create_graph is False x_grad, x_grad_clone = compute_grad(create_graph=True) self.assertEqual(x_grad, x_grad_clone) def test_accumulate_grad_tensor_reference(self): def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph): params = torch.tensor([1.5, 1.5]).requires_grad_() params.grad = params_grad_tensor grad_saved = params.grad params.backward(backward_grad_tensor, create_graph=create_graph) self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference) for create_graph in (False, True): # Accumulate dense gradient to sparse gradient will change the `params.grad` reference _test_grad_tensor( torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), torch.tensor([1.5, 1.5]), False, # never accumulates in-place create_graph) # Accumulate dense gradient to dense gradient will preserve the `params.grad` reference, # but only if create_graph=False. _test_grad_tensor( torch.tensor([1.5, 1.5]), torch.tensor([1.5, 1.5]), not create_graph, create_graph) # Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference, # but only if create_graph=False. _test_grad_tensor( torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), not create_graph, create_graph) @skipIfNoLapack def test_slogdet_sign(self): a = torch.randn(3, 3, dtype=torch.double, requires_grad=True) s, logdet = a.slogdet() # test that sign should not require grad self.assertFalse(s.requires_grad) # test that backward through computation involving sign works def sign_mul_logdet(mat): s, logdet = mat.slogdet() return s * logdet u, s, v = a.detach().svd() s.abs_().clamp_(0.0001) for sign in (-1, 1): s[-1] = sign mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_() gradcheck(sign_mul_logdet, mat) gradgradcheck(sign_mul_logdet, mat) def test_sum_to_with_empty_dim_grad(self): a = torch.rand(4, 0, requires_grad=True) b = torch.rand(4, 1, requires_grad=True) c = a + b assert c.shape == (4, 0) c.sum().backward() self.assertEqual(b.grad, torch.zeros(4, 1)) self.assertEqual(a.grad, torch.zeros(4, 0)) def test_hessian_vector(self): x = torch.randn(2, 2, requires_grad=True) y = torch.randn(2, 2, requires_grad=True) z = x ** 2 + y * x + y ** 2 z.backward(torch.ones(2, 2), create_graph=True) with torch.no_grad(): x_grad = 2 * x + y y_grad = x + 2 * y self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) grad_sum = 2 * x.grad + y.grad grad_sum.backward(torch.ones(2, 2)) x_hv = torch.ones(2, 2) * 5 y_hv = torch.ones(2, 2) * 4 self.assertEqual(x.grad, x_grad + x_hv) self.assertEqual(y.grad, y_grad + y_hv) def test_grad(self): x = torch.randn(2, 2, requires_grad=True) y = torch.randn(2, 2, requires_grad=True) z = x ** 2 + y * x + y ** 2 z.backward(torch.ones(2, 2), create_graph=True) x_grad = 2 * x + y y_grad = x + 2 * y self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) grad_sum = 2 * x.grad + y.grad x_hv = torch.autograd.grad( outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)], inputs=[x], create_graph=True) expected_x_hv = torch.ones(2, 2) * 5 expected_y_hv = torch.ones(2, 2) * 4 self.assertEqual(x_hv[0], expected_x_hv) self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) # Test that grad_outputs and outputs have the same shape grad_out = torch.ones(2) try: torch.autograd.grad( outputs=[grad_sum], grad_outputs=[grad_out], inputs=[x], create_graph=True) self.assertFail() except RuntimeError as error: self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of " + str(grad_out.shape) + " and output[0] has a shape of " + str(grad_sum.shape) + ".") def test_grad_nonleaf(self): x_init = torch.randn(2, 2, requires_grad=True) x = x_init y = torch.randn(2, 2, requires_grad=True) grad_output = torch.ones(2, 2) def fn(x): return x ** 2 + y * x + y ** 2 for _ in range(5): grad_x, = torch.autograd.grad( fn(x), x, grad_outputs=grad_output, create_graph=True) grad_x_expected = 2 * x + y self.assertIsNone(y.grad) self.assertIsNone(x.grad) self.assertEqual(grad_x, grad_x_expected) x = x + 0.05 * grad_x val_init = fn(x_init).sum() val_final = fn(x).sum() self.assertGreater(val_final, val_init) x.backward(grad_output) self.assertIsNotNone(y.grad) self.assertIsNotNone(x_init.grad) def test_grad_nonleaf_many_outputs(self): # This checks an edge case for function callbacks # We want to capture two grads of a function, but can only # register a single callback. x = torch.randn(4, 2, requires_grad=True) a, b = x.chunk(2) def hook(*grads): hook_called[0] = True hook_called = [False] x.register_hook(hook) go = torch.randn(2, 2) grad_a, grad_b = torch.autograd.grad( (a + 2 * b), [a, b], grad_outputs=go, create_graph=True) self.assertEqual(grad_a, go) self.assertEqual(grad_b, go * 2) self.assertFalse(hook_called[0]) self.assertIsNone(x.grad) def test_grad_nonleaf_register_hook(self): # This checks an edge case for register_hook. # We want to capture grad of a nonleaf tensor, # but avoid segfault during backward of other nonleaf tensors x = torch.randn(5, requires_grad=True) x_list = x.unbind() x0 = x_list[0] hook_results = [None] def hook(grad): hook_results[0] = grad x0.register_hook(hook) x_list[0].backward() self.assertEqual(hook_results[0], torch.tensor(1.)) expected_grad = torch.tensor([1., 0, 0, 0, 0]) self.assertEqual(x.grad, expected_grad) self.assertIsNone(x_list[0].grad) for i in range(1, 5, 1): x_list[i].backward() self.assertEqual(hook_results[0], None) expected_grad[i] = 1.0 self.assertEqual(x.grad, expected_grad) self.assertIsNone(x_list[i].grad) def test_hook_with_no_name(self): # Create a hook that do not have a __name__ attribute class MyHookClass: def __call__(self, grad): return grad.clone() x = torch.randn(5, requires_grad=True).clone() x.register_hook(MyHookClass()) x.sum().backward() # Should run fine def test_sharded_grad(self): leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)] intermediates = [l * i + l * l for i, l in enumerate(leaves)] loss = sum(v * i for i, v in enumerate(intermediates)).sum() # define a helper for dividing intermediates into groups def group(l, group_size): return (l[i:i + group_size] for i in range(0, len(l), group_size)) # Compute the d loss / d intermediates in chunks of shard_size shard_size = 2 d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size) for d_i in torch.autograd.grad(loss, intermediates_batch)] # Compute rest of backward pass torch.autograd.backward(intermediates, d_intermediates) for i, l in enumerate(leaves): self.assertEqual(l.grad, i * i * (1 + l)) def test_backward_badcalls(self): x = torch.ones(1) with self.assertRaisesRegex(RuntimeError, 'does not require grad'): x.backward() def test_grad_badcalls(self): x = torch.ones(1) y = x ** 2 with self.assertRaisesRegex(RuntimeError, 'does not require grad'): torch.autograd.grad(x, y) with self.assertRaisesRegex(RuntimeError, 'does not require grad'): torch.autograd.grad(y, x) x = torch.ones(1, requires_grad=True) y = x ** 2 torch.autograd.grad(y, x) # this should succeed now def test_grad_empty_inputs(self): x = torch.tensor([1.0], requires_grad=True) with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."): torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0])) def test_grad_fn_badcalls(self): error_regex = 'expected .* arguments, got .* instead' x = torch.ones(1, requires_grad=True) y = x ** 2 with self.assertRaisesRegex(TypeError, error_regex): y.grad_fn(x.detach(), x.detach()) # too many with self.assertRaisesRegex(TypeError, error_regex): y.grad_fn() # too few y.grad_fn(x.detach()) # this should succeed def test_grad_unreachable(self): x = torch.ones(1, requires_grad=True) y = torch.ones(1, requires_grad=True) # Make sure x and y have grad accumulators allocated z = x * 2 w = y * 2 grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True) self.assertEqual(grad_x, x * 2) self.assertIsNone(grad_y) # This is slightly different than the case above, because z doesn't even # have a grad accumulator allocated. z = torch.ones(1, requires_grad=True) grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True) self.assertEqual(grad_x, x * 2) self.assertIsNone(grad_z) # allow_unused=False, but grads contains None inside, should throw with self.assertRaisesRegex(RuntimeError, "Set allow_unused=True"): grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False) def test_grad_unreachable_discovery(self): # Test that certain nodes are not erroneously executed when an input # is unreachable. See #39784 class MyFunc(torch.autograd.Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, x): self.fail("This node should not be executed!") x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2) y = torch.randn(1, requires_grad=True) (gY,) = torch.autograd.grad(x, (y, ), allow_unused=True) self.assertIsNone(gY) x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2) y = torch.randn(1, requires_grad=True) z = torch.randn(1, requires_grad=True) (gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True) self.assertIsNone(gY) self.assertIsNotNone(gZ) x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2) y = torch.randn(1, requires_grad=True) torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True! self.assertIsNone(y.grad) def test_grad_batched_grad(self): x = torch.randn(2, 2, requires_grad=True) out = x.clone() # Size([2, 2]) batched_grad = torch.arange(3).expand(2, 2, 3).transpose(0, 2) # Size([3, 2, 2]) grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True) self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype)) # Detect shape mismatch grad_out = torch.ones(2, 2) with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"): torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True) # Scalar outputs out = x.sum() # Size([]) batched_grad = torch.arange(3) # Size([3]) grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True) self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype)) # We consider scalar and sized-1 to be a mismatch. This is consistent with current non-batched behavior. grad_out = torch.ones(2).unsqueeze(1) with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"): torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True) def test_hooks(self): x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5) * 4 y.requires_grad_(True) counter = [0] def bw_hook(inc, grad): self.assertIsInstance(grad, torch.Tensor) counter[0] += inc z = x ** 2 + x * 2 + x * y + y x.register_hook(lambda *args: bw_hook(0, *args)) test = z.register_hook(lambda *args: bw_hook(1, *args)) z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 1) test2 = z.register_hook(lambda *args: bw_hook(2, *args)) z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 4) test2.remove() z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 5) def bw_hook_modify(grad): return grad.mul(2) test.remove() z.register_hook(bw_hook_modify) with torch.no_grad(): y.grad.zero_() z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(y.grad, (x + 1) * 2) y.register_hook(bw_hook_modify) with torch.no_grad(): y.grad.zero_() z.backward(torch.ones(5, 5)) self.assertEqual(y.grad, (x + 1) * 4) def test_hooks_cpp(self): # Tests hooks for autograd function implemented in C++ bn = torch.nn.BatchNorm1d(5, affine=False) bn.double() bn.eval() counter = [0] def bw_hook(grad): counter[0] += 1 return grad * 2 x = torch.ones(5, 5, dtype=torch.double, requires_grad=True) z = bn(x) z.register_hook(bw_hook) z.sum().backward() self.assertEqual(counter[0], 1, msg='bw_hook not called') self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0) def test_hook_none(self): # WARNING: this is a test for autograd internals. # You should never have to use such things in your code. class NoneGradientFunction(Function): @staticmethod def forward(ctx, x, y): assert ctx.needs_input_grad[0] assert not ctx.needs_input_grad[1] return x, y @staticmethod def backward(ctx, grad_x, grad_y): return grad_x, None was_called = [False] def hook(grad): self.assertIsNotNone(grad) was_called[0] = True x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5) rx, ry = NoneGradientFunction.apply(x, y) rx.register_hook(hook) ry.register_hook(hook) sum(rx, ry).sum().backward() self.assertTrue(was_called[0]) def test_retain_grad(self): input = torch.rand(1, 3, requires_grad=True) h1 = input * 3 out = (h1 * h1).sum() # It should be possible to call retain_grad() multiple times h1.retain_grad() h1.retain_grad() # Gradient should be accumulated out.backward(retain_graph=True) self.assertEqual(h1 * 2, h1.grad) out.backward(retain_graph=True) self.assertEqual(h1 * 4, h1.grad) with torch.no_grad(): input.grad.zero_() # It should be a no-op for leaves input.retain_grad() input.retain_grad() out.backward() self.assertEqual(input * 18, input.grad) def test_retain_grad_cycle(self): x = torch.ones(5, 5, requires_grad=True) def run_test(): y = x * 2 y.retain_grad() return y / 2, torch._C._WeakTensorRef(y) z, ref = run_test() self.assertTrue(ref.expired()) z.sum().backward() def test_backward(self): v = torch.randn(5, 5, requires_grad=True) x = torch.randn(5, 5, requires_grad=True) y = (torch.rand(5, 5) + 0.1).requires_grad_(True) z = torch.randn(5, 5, requires_grad=True) grad_output = torch.randn(5, 5) v.backward(grad_output) self.assertEqual(v.grad, grad_output) a = x + (y * z) + 4 * z ** 2 * x / y a.backward(grad_output) x_grad = 4 * z.pow(2) / y + 1 y_grad = z - 4 * x * z.pow(2) / y.pow(2) z_grad = 8 * x * z / y + y self.assertEqual(x.grad, x_grad * grad_output) self.assertEqual(y.grad, y_grad * grad_output) self.assertEqual(z.grad, z_grad * grad_output) def test_sparse_mm_backward(self): size = (3, 3) sparse = torch.sparse_coo_tensor(size, requires_grad=True) dense = torch.randn(size, requires_grad=True) with self.assertRaisesRegex( RuntimeError, "The backward pass for this operation requires the 'mat1' tensor to be strided,"): z = dense.addmm(sparse, dense) mm_test_cases = [ # a requires grad, a is sparse, b requires grad, b is sparse, error message (False, True, True, False, None), (False, False, True, True, "The backward pass for this operation requires the 'mat2'"), (False, True, True, True, "The backward pass for this operation requires the 'mat2'"), (True, False, True, True, "The backward pass for this operation requires the 'mat2'"), (True, True, False, False, "The backward pass for this operation requires the 'self'"), (True, True, True, False, "The backward pass for this operation requires the 'self'"), (True, True, True, True, "The backward pass for this operation requires the 'mat2'"), ] for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases: # We should only be testing cases with sparse inputs, and at least one # input needs to require grad so we can call a backward pass assert a_is_sparse or b_is_sparse assert a_req_grad or b_req_grad a = torch.randn(size, requires_grad=a_req_grad) if a_is_sparse: a = a.to_sparse() b = torch.randn(size, requires_grad=b_req_grad) if b_is_sparse: b = b.to_sparse() # If no error expected, check that sparse and dense cases match if err_msg is None: r = a.mm(b) r.sum().backward() a_grad = None if a.grad is None else a.grad.clone().detach() b_grad = None if b.grad is None else b.grad.clone().detach() # Redo with only dense tensors a = (a.to_dense() if a.is_sparse else a).clone().detach() a.requires_grad = a_req_grad b = (b.to_dense() if b.is_sparse else b).clone().detach() b.requires_grad = b_req_grad r = a.mm(b) r.sum().backward() self.assertEqual(a_grad, a.grad) self.assertEqual(b_grad, b.grad) else: with self.assertRaisesRegex(RuntimeError, err_msg): a.mm(b) def test_multi_backward(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) q = torch.randn(5, 5, requires_grad=True) a = torch.randn(5, 5, requires_grad=True) b = torch.randn(5, 5, requires_grad=True) q2 = q * 2 z = x + y + q2 c = a * b + q2 grad_z = torch.randn(5, 5) grad_c = torch.randn(5, 5) torch.autograd.backward([z, c], [grad_z, grad_c]) self.assertEqual(x.grad, grad_z) self.assertEqual(y.grad, grad_z) self.assertEqual(a.grad, grad_c * b) self.assertEqual(b.grad, grad_c * a) self.assertEqual(q.grad, (grad_c + grad_z) * 2) def test_multi_backward_no_grad(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=False) z = x + y q = y * 2 # NB: we currently raise an exception if any arguments to backwards # have requires_grad=False and don't have a grad_fn. We may want to # relax that check to a warning. def call_backwards(): torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)]) self.assertRaises(RuntimeError, call_backwards) def test_backward_with_inputs(self): x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) y = torch.randn(2, 2, dtype=torch.double, requires_grad=True) def fn(): return x ** 2 + y * x + y ** 2 gradient = torch.ones(2, 2) x_grad_expected = 2 * x + y y_grad_expected = x + 2 * y @torch.no_grad() def reset_grad(): x.grad.zero_() y.grad.zero_() torch.autograd.backward(fn(), gradient, inputs=[x, y]) self.assertEqual(x.grad, x_grad_expected) self.assertEqual(y.grad, y_grad_expected) reset_grad() torch.autograd.backward(fn(), gradient, inputs=[x]) self.assertEqual(x.grad, x_grad_expected) self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False) reset_grad() torch.autograd.backward(fn(), gradient, inputs=[y]) self.assertEqual(y.grad, y_grad_expected) self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False) reset_grad() torch.autograd.backward(fn(), gradient, inputs=y) self.assertEqual(y.grad, y_grad_expected) self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False) reset_grad() self.assertRaisesRegex(RuntimeError, 'cannot be empty', lambda: torch.autograd.backward(fn(), gradient, inputs=[])) def test_backward_with_nonleaf_inputs(self): x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) x_nonleaf = x * 1 y = torch.randn(2, 2, dtype=torch.double, requires_grad=True) z = torch.randn(2, 2, dtype=torch.double, requires_grad=True) out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2 out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf]) x_grad_expected = 2 * x + y y_grad_expected = x + 2 * y x_non_leaf_expected = 2 * x_nonleaf + y self.assertEqual(y.grad, y_grad_expected) self.assertEqual(x.grad, x_grad_expected) self.assertEqual(x_nonleaf.grad, x_non_leaf_expected) # backward doesn't have an allow_unused flag, so the behavior of backward # when variable is not part of the graph is as if allow_used were true # x.grad will simply be None. out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z]) self.assertIsNone(z.grad) def test_dependent_backward(self): x = torch.randn(10, requires_grad=True) y = x ** 2 z = y ** 3 go_y = torch.randn(10) go_z = torch.randn(10) torch.autograd.backward([y, z], [go_y, go_z]) xd = x self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z) def test_save_output_nr(self): x = torch.randn(10, requires_grad=True) class MultiOutputFn(Function): @staticmethod def forward(ctx, x): return x[:5], x[5:] @staticmethod def backward(ctx, *grad): return torch.cat(grad) a, b = MultiOutputFn.apply(x) self.assertEqual(b.output_nr, 1) class TestFn(Function): @staticmethod def forward(ctx, b): ctx.save_for_backward(b) return b * 2 @staticmethod def backward(ctx, grad_b): b, = ctx.saved_tensors self.assertEqual(b.output_nr, 1) TestFn.apply(b).sum().backward() def test_free_deep_graph(self): def scope(): depth = 150000 x = torch.randn(1, requires_grad=True) y = x.clone() # build a "chain" computation graph for _ in range(depth): y = y + y * 0.000001 # graph deletion occurs when the above locals go out of scope. # In this case `del y` will trigger it but it's easier to leave # it to Python to delete the locals. # Should not stack overflow scope() def test_free_deep_graph_complicated(self): def scope(): depth = 100000 randchoice = torch.randint(2, [depth, 2]) x = torch.randn(1, requires_grad=True) y = x.clone() # Hold the two previous values prev_values = [None, None] # Build a "chain with skip connections" graph for _ in range(depth): prev_tensors = [tensor for tensor in prev_values[:-1] if tensor is not None] prev_values.append(y) prev_values.pop(0) # Definitely pick one tensor to add y += y * 0.000001 # Possibly add other tensors nprev = len(prev_tensors) if nprev == 2: y += randchoice[depth].mul(torch.cat(prev_tensors)).sum() # graph deletion occurs when the above locals go out of scope. # Should not stack overflow scope() def test_free_deep_graph_pyfunction(self): class MyOp(Function): @staticmethod def forward(ctx, tensor1, tensor2): return tensor1 + tensor2 @staticmethod def backward(ctx, grad_output): return grad_output, grad_output def scope(): depth = 150000 x = torch.randn(1, requires_grad=True) y = x.clone() # build deeply nested computation graph for _ in range(depth): y = MyOp.apply(y, y) # graph deletion occurs when the above locals go out of scope. # Should not stack overflow scope() def test_no_unnecessary_save(self): # If we kept x in the derivative Function of x * 2 we would # get an error in the backward that would complain that we've # modified x, which was needed for gradient computation. # Since we should elide unnecessary saves, this test should pass. mu = torch.ones(1, requires_grad=True) x = torch.empty(1) loss = 0 for i in range(3): x.detach_() x.copy_(mu + i) ft = torch.tensor([float(i)]) multiplied = x * ft s = multiplied.sum() loss += s loss.backward() def test_no_grad(self): x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5) * 4 with torch.no_grad(): w = x + y @torch.no_grad() def adder(x, y): return x + y z = adder(x, y) self.assertFalse(w.requires_grad) self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5))) self.assertIsNone(w.grad_fn) self.assertFalse(z.requires_grad) self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5))) self.assertIsNone(z.grad_fn) # test nested decorator and with-statement on no_grad with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) w = adder(x, y) self.assertFalse(torch.is_grad_enabled()) def test_set_grad_generator_functions(self): @torch.no_grad() def gen_no_grad(): for i in range(10): self.assertEqual(torch.is_grad_enabled(), False) yield i with torch.enable_grad(): for _ in gen_no_grad(): self.assertEqual(torch.is_grad_enabled(), True) @torch.enable_grad() def gen_enable_grad(): for i in range(10): self.assertEqual(torch.is_grad_enabled(), True) yield i with torch.no_grad(): for _ in gen_enable_grad(): self.assertEqual(torch.is_grad_enabled(), False) def test_set_grad_generator_functions_recursive(self): # enable_grad_decorator_recursive and no_grad_decorator_recursive call each other # recursively, to ensure that the decorators preserve the caller's setting @torch.enable_grad() def enable_grad_decorator_recursive(depth): self.assertTrue(torch.is_grad_enabled()) if depth > 0: no_grad_decorator_recursive(depth - 1) self.assertTrue(torch.is_grad_enabled()) @torch.no_grad() def no_grad_decorator_recursive(depth): self.assertFalse(torch.is_grad_enabled()) if depth > 0: enable_grad_decorator_recursive(depth - 1) self.assertFalse(torch.is_grad_enabled()) # enable_grad_context_manager_recursive and no_grad_context_manager_recursive call # each other recursively, to ensure that the decorators preserve the caller's setting def enable_grad_context_manager_recursive(depth): with torch.enable_grad(): self.assertTrue(torch.is_grad_enabled()) if depth > 0: no_grad_context_manager_recursive(depth - 1) self.assertTrue(torch.is_grad_enabled()) def no_grad_context_manager_recursive(depth): with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) if depth > 0: enable_grad_context_manager_recursive(depth - 1) self.assertFalse(torch.is_grad_enabled()) with torch.enable_grad(): self.assertTrue(torch.is_grad_enabled()) enable_grad_decorator_recursive(10) self.assertTrue(torch.is_grad_enabled()) enable_grad_context_manager_recursive(10) self.assertTrue(torch.is_grad_enabled()) with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) enable_grad_decorator_recursive(10) self.assertFalse(torch.is_grad_enabled()) enable_grad_context_manager_recursive(10) self.assertFalse(torch.is_grad_enabled()) def test_set_grad_coroutines(self): @torch.no_grad() def coro_no_grad(n=10): self.assertFalse(torch.is_grad_enabled()) for i in range(n): self.assertFalse(torch.is_grad_enabled()) r = yield i self.assertFalse(torch.is_grad_enabled()) self.assertEqual(i, r) self.assertFalse(torch.is_grad_enabled()) @torch.enable_grad() def coro_enable_grad(n=10): self.assertTrue(torch.is_grad_enabled()) for i in range(n): self.assertTrue(torch.is_grad_enabled()) r = yield i self.assertTrue(torch.is_grad_enabled()) self.assertEqual(i, r) self.assertTrue(torch.is_grad_enabled()) with torch.enable_grad(): self.assertTrue(torch.is_grad_enabled()) coro, r = coro_no_grad(), None try: while True: self.assertTrue(torch.is_grad_enabled()) r = coro.send(r) self.assertTrue(torch.is_grad_enabled()) except StopIteration: pass with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) coro, r = coro_enable_grad(), None try: while True: self.assertFalse(torch.is_grad_enabled()) r = coro.send(r) self.assertFalse(torch.is_grad_enabled()) except StopIteration: pass def test_set_grad_coroutines_benign_exceptions(self): class RecoverableException(Exception): pass @torch.no_grad() def coro_no_grad(n=10): has_raised = False for i in range(n): try: self.assertFalse(torch.is_grad_enabled()) yield (-i if has_raised else i) except RecoverableException: self.assertFalse(torch.is_grad_enabled()) has_raised = True @torch.enable_grad() def coro_enable_grad(n=10): has_raised = False for i in range(n): try: self.assertTrue(torch.is_grad_enabled()) yield (-i if has_raised else i) except RecoverableException: self.assertTrue(torch.is_grad_enabled()) has_raised = True with torch.enable_grad(): coro = coro_no_grad() assert 0 == next(coro) try: while True: r = coro.throw(RecoverableException) self.assertLess(r, 0) except StopIteration: pass with torch.no_grad(): coro = coro_enable_grad() assert 0 == next(coro) try: while True: r = coro.throw(RecoverableException) self.assertLess(r, 0) except StopIteration: pass def test_set_grad_coroutines_critical_exceptions(self): class UnrecoverableException(Exception): pass class SecondaryException(Exception): pass @torch.no_grad() def coro_no_grad(n=10): has_raised = False for i in range(n): try: self.assertFalse(torch.is_grad_enabled()) yield (-i if has_raised else i) except UnrecoverableException: self.assertFalse(torch.is_grad_enabled()) raise SecondaryException @torch.enable_grad() def coro_enable_grad(n=10): has_raised = False for i in range(n): try: self.assertTrue(torch.is_grad_enabled()) yield (-i if has_raised else i) except UnrecoverableException: self.assertTrue(torch.is_grad_enabled()) raise SecondaryException with torch.enable_grad(): coro = coro_no_grad() assert 0 == next(coro) with self.assertRaises(SecondaryException): coro.throw(UnrecoverableException) with torch.no_grad(): coro = coro_enable_grad() assert 0 == next(coro) with self.assertRaises(SecondaryException): coro.throw(UnrecoverableException) def test_set_grad_coroutines_exit(self): @torch.no_grad() def coro_no_grad(state): for i in range(10): try: self.assertFalse(torch.is_grad_enabled()) yield i except GeneratorExit: self.assertFalse(torch.is_grad_enabled()) state.add('GeneratorExit') raise @torch.enable_grad() def coro_enable_grad(state): for i in range(10): try: self.assertTrue(torch.is_grad_enabled()) yield i except GeneratorExit: self.assertTrue(torch.is_grad_enabled()) state.add('GeneratorExit') raise state = set() with torch.enable_grad(): coro = coro_no_grad(state) for i in range(5): next(coro) coro.close() self.assertTrue('GeneratorExit' in state) state = set() with torch.no_grad(): coro = coro_enable_grad(state) for i in range(5): next(coro) coro.close() self.assertTrue('GeneratorExit' in state) def test_no_grad_python_function(self): """Python Functions should respect grad mode.""" x = torch.ones(5, 5, requires_grad=True) class MyOp(Function): @staticmethod def forward(self, x): return x + 1 @staticmethod def backward(self, dy): return dy with torch.no_grad(): y = MyOp.apply(x) self.assertFalse(y.requires_grad) def test_indexing(self): x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) def compare(x, y, idx, indexed_tensor, indexed_var): indexed_var_t = indexed_var.data if not isinstance(indexed_tensor, torch.Tensor): indexed_var_t = indexed_var_t[0] self.assertEqual(indexed_tensor, indexed_var_t) indexed_var.sum().backward() expected_grad = torch.empty(x.size()).fill_(0) expected_grad[idx] = 1 self.assertEqual(y.grad, expected_grad) def check_index(x, y, idx): if y.grad is not None: with torch.no_grad(): y.grad.zero_() indexed_tensor = x[idx] indexed_var = y[idx] compare(x, y, idx, indexed_tensor, indexed_var) check_index(x, y, 1) check_index(x, y, (1, 1)) check_index(x, y, slice(1, None)) check_index(x, y, slice(None, 2)) check_index(x, y, (slice(None, 2), 2)) check_index(x, y, (slice(1, 2), 2)) check_index(x, y, (1, slice(2, None))) check_index(x, y, (slice(None, None), slice(2, None))) check_index(x, y, torch.LongTensor([0, 2])) check_index(x, y, torch.rand(4, 4).bernoulli().bool()) check_index(x, y, (Ellipsis, slice(2, None))) check_index(x, y, ([0], [0])) check_index(x, y, ([1, 2, 3], [0])) check_index(x, y, ([1, 2], [2, 1])) check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]])) check_index(x, y, ([slice(None), [2, 3]])) check_index(x, y, ([[2, 3], slice(None)])) # advanced indexing, with less dim, or ellipsis check_index(x, y, ([0])) check_index(x, y, ([0], )) x = torch.arange(1., 49).view(4, 3, 4) y = Variable(x, requires_grad=True) check_index(x, y, (slice(None), [0], [0])) check_index(x, y, ([0], [0], slice(None))) check_index(x, y, (slice(None), [0, 1, 2], [0])) check_index(x, y, ([0, 1, 2], [0], slice(None))) check_index(x, y, (slice(None), [1, 2], [2, 1])) check_index(x, y, ([1, 2], [2, 1], slice(None))) check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]])) check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None))) check_index(x, y, (slice(None), slice(None), [2, 1])) check_index(x, y, (slice(None), [2, 1], slice(None))) check_index(x, y, ([2, 1], slice(None), slice(None))) # advanced indexing, with less dim, or ellipsis check_index(x, y, ([0], )) check_index(x, y, ([0], slice(None))) check_index(x, y, ([0], Ellipsis)) check_index(x, y, ([1, 2], [0, 1])) check_index(x, y, ([1, 2], [0, 1], Ellipsis)) check_index(x, y, (Ellipsis, [1, 2], [0, 1])) # advanced indexing, with a tensor wrapped in a variable z = torch.LongTensor([0, 1]) zv = Variable(z, requires_grad=False) seq = [z, Ellipsis] seqv = [zv, Ellipsis] if y.grad is not None: with torch.no_grad(): y.grad.zero_() indexed_tensor = x[seq] indexed_var = y[seqv] compare(x, y, seq, indexed_tensor, indexed_var) def test_indexing_duplicates(self): x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = torch.LongTensor([1, 1, 3, 2, 1, 2]) y[idx].sum().backward() expected_grad = torch.zeros(4, 4) for i in idx: expected_grad[i] += 1 self.assertEqual(y.grad, expected_grad) # with advanced indexing x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = [[1, 1, 3, 2, 1, 2], [0]] y[idx].sum().backward() expected_grad = torch.zeros(4, 4) for i in idx[0]: for j in idx[1]: expected_grad[i][j] += 1 self.assertEqual(y.grad, expected_grad) x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]] y[idx].sum().backward() expected_grad = torch.tensor([[0., 2., 0., 0.], [1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 0.]]) self.assertEqual(y.grad, expected_grad) x = torch.arange(1., 65).view(4, 4, 4) y = Variable(x, requires_grad=True) idx = [[1, 1, 1], slice(None), slice(None)] y[idx].sum().backward() expected_grad = torch.empty(4, 4, 4).zero_() expected_grad[1].fill_(3) self.assertEqual(y.grad, expected_grad) def test_index_backward_does_not_save_tensor(self): # Example from https://github.com/pytorch/pytorch/issues/24853. # if `index(tensor, indices)` saves `tensor` for backwards, then it will # trigger a version check on `tensor` during the backward pass, which # will cause the following code to error because `tensor` gets modified # by the indexing line. a = torch.tensor([1., 0, 0]) b = torch.zeros(3, requires_grad=True) tensor = b + 0 tensor[a != 0] = tensor[a != 0] tensor.backward(torch.zeros_like(tensor)) def test_volatile_deprecated(self): v = torch.autograd.torch.randn(3, 3) with warnings.catch_warnings(record=True) as w: self.assertFalse(v.volatile) self.assertIn('volatile', str(w[0].message)) def test_saved_variables_deprecated(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, tensor2): ctx.save_for_backward(tensor1, tensor2) return tensor1 + tensor2 @staticmethod def backward(ctx, grad_output): var1, var2 = ctx.saved_variables return (grad_output, grad_output) with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("always") x = torch.randn((3, 3), requires_grad=True) y = torch.randn((3, 3), requires_grad=True) MyFunction.apply(x, y).sum().backward() has_deprecated = map(lambda warn: 'deprecated' in str(warn) and 'saved_variables' in str(warn), warns) has_deprecated = reduce(lambda x, y: x or y, has_deprecated) self.assertTrue(has_deprecated) def test_requires_grad(self): x = torch.randn(5, 5) y = torch.randn(5, 5) z = torch.randn(5, 5, requires_grad=True) a = x + y self.assertFalse(a.requires_grad) b = a + z self.assertTrue(b.requires_grad) def error(): raise RuntimeError # Make sure backward isn't called on these a._backward_hooks = OrderedDict() x._backward_hooks = OrderedDict() y._backward_hooks = OrderedDict() a._backward_hooks['test'] = error x._backward_hooks['test'] = error y._backward_hooks['test'] = error b.backward(torch.ones(5, 5)) def test_requires_grad_(self): x = torch.randn(5, 5) y = torch.randn(5, 5, requires_grad=True) self.assertIs(x, x.requires_grad_()) self.assertTrue(x.requires_grad) self.assertIs(y, y.requires_grad_()) self.assertTrue(y.requires_grad) self.assertIs(x, x.requires_grad_(True)) self.assertTrue(x.requires_grad) self.assertIs(y, y.requires_grad_(True)) self.assertTrue(y.requires_grad) z = x * y self.assertRaises(RuntimeError, lambda: z.requires_grad_(False)) self.assertIs(z, z.requires_grad_()) self.assertTrue(z.requires_grad) self.assertIs(z, z.requires_grad_(True)) self.assertTrue(z.requires_grad) self.assertIs(x, x.requires_grad_(False)) self.assertFalse(x.requires_grad) self.assertIs(y, y.requires_grad_(False)) self.assertFalse(y.requires_grad) def test_requires_grad_inplace(self): a = torch.randn(5, 5) b = torch.randn(5, 5, requires_grad=True) a += b self.assertTrue(a.requires_grad) # non-leaf a = torch.randn(5, 5) + 0 b = torch.randn(5, 5, requires_grad=True) a += b self.assertTrue(a.requires_grad) def test_no_requires_grad_inplace(self): # basic case, should be able to modify inplace while requires_grad is False a = torch.randn(2, 3) a.add_(5) a.requires_grad = True a.sum().backward() self.assertEqual(a.grad, torch.ones(2, 3)) # same but with a view a = torch.randn(2, 3) b = a[:] b.add_(5) a.requires_grad = True a.sum().backward() self.assertEqual(a.grad, torch.ones(2, 3)) # should fail if requires_grad = True when we modify inplace a = torch.randn(2, 3) b = a[:] a.requires_grad = True with self.assertRaises(RuntimeError): a.add_(5) with self.assertRaises(RuntimeError): b.add_(5) def test_attribute_deletion(self): x = torch.randn((5, 5), requires_grad=True) del x.grad self.assertIsNone(x.grad) with self.assertRaises(RuntimeError): del x.data with self.assertRaises(TypeError): x.data = None with self.assertRaises(RuntimeError): del x.requires_grad with self.assertRaises(RuntimeError): del x._grad_fn with self.assertRaises(RuntimeError): del x._backward_hooks def test_duplicate_backward_root(self): a = torch.randn(5, 5, requires_grad=True) b = torch.randn(5, 5, requires_grad=True) x = a * b grad_output = torch.randn_like(x) torch.autograd.backward([x, x], [grad_output, grad_output]) self.assertEqual(a.grad, b * grad_output * 2) self.assertEqual(b.grad, a * grad_output * 2) def test_backward_no_grad(self): a = torch.randn(5, 5, requires_grad=True) b = a + 2 with self.assertRaises(RuntimeError): torch.autograd.backward([b], [None]) def test_backward_twice_with_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True', lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double))) def test_backward_twice_retained_graph_with_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_backward_twice_without_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = b + 1 c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_backward_twice_retained_graph_without_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_backward_create_graph_warns(self): try: prev = torch.is_warn_always_enabled() torch.set_warn_always(True) b = torch.randn(3, requires_grad=True, dtype=torch.double) c = b * b with warnings.catch_warnings(record=True) as ws: c.backward(torch.ones_like(c), create_graph=True) b.grad = None self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws)) # Should not warn for grad with warnings.catch_warnings(record=True) as ws: torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True) self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws)) finally: torch.set_warn_always(prev) def test_next_functions(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) a = x + y self.assertIsNotNone(a.grad_fn) next_functions = a.grad_fn.next_functions self.assertEqual(len(next_functions), 2) self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad) self.assertEqual(next_functions[0][1], 0) self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad) self.assertEqual(next_functions[1][1], 0) b = a + 5 next_functions = b.grad_fn.next_functions self.assertEqual(len(next_functions), 2) self.assertIs(next_functions[0][0], a.grad_fn) self.assertIs(next_functions[1][0], None) def test_inplace(self): x = torch.ones(5, 5, requires_grad=True) y = Variable(torch.ones(5, 5) * 4, requires_grad=True) z = x * y q = z + y w = z * y z.add_(2) # Add doesn't need it's inputs to do backward, so it shouldn't raise q.backward(torch.ones(5, 5), retain_graph=True) # Mul saves both inputs in forward, so it should raise self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5))) z = x * y q = z * y r = z + y w = z.add_(y) # w is a the last expression, so this should succeed w.backward(torch.ones(5, 5), retain_graph=True) # r doesn't use the modified value in backward, so it should succeed r.backward(torch.ones(5, 5), retain_graph=True) # q uses dirty z, so it should raise self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5))) with torch.no_grad(): x.grad.zero_() m = x / 2 z = m + y / 8 q = z * y r = z + y prev_version = z._version w = z.exp_() self.assertNotEqual(z._version, prev_version) r.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(x.grad, torch.ones(5, 5) / 2) w.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2)) self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5))) leaf = torch.ones(5, 5, requires_grad=True) x = leaf.clone() x.add_(10) self.assertEqual(x, torch.ones(5, 5) * 11) # x should be still usable y = x + 2 y.backward(torch.ones(5, 5)) self.assertEqual(leaf.grad, torch.ones(5, 5)) z = x * y x.add_(2) self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5))) def test_mark_non_differentiable(self): class MyFunction(Function): @staticmethod def forward(ctx, input): output = input > 0 ctx.mark_non_differentiable(output) return output @staticmethod def backward(ctx, grad_output): return (grad_output * 0).to(torch.double) x = torch.randn(5, 5, requires_grad=True) mask = MyFunction.apply(x) self.assertFalse(mask.requires_grad) y = x.masked_fill(mask, 0) y.sum().backward() def test_mark_non_differentiable_mixed(self): class MyFunction(Function): @staticmethod def forward(ctx, input): a = input + 1 b = input + 2 ctx.mark_non_differentiable(a) return a, b @staticmethod def backward(ctx, grad_a, grad_b): self.assertTrue((grad_a == 0).all()) self.assertTrue((grad_b == 1).all()) return grad_b x = torch.randn(5, 5, requires_grad=True) a, b = MyFunction.apply(x) self.assertFalse(a.requires_grad) self.assertTrue(b.requires_grad) b.sum().backward() self.assertEqual(x.grad, torch.ones(5, 5)) def test_mark_non_differentiable_none(self): # This used to segfault because MyFunction would send back null # gradients to MulBackward, which is implemented in C++. C++ # implemented functions expect incoming grad_ouptuts to be non-null. class MyFunction(Function): @staticmethod def forward(ctx, input): output = input.clone() ctx.mark_non_differentiable(output) return output @staticmethod def backward(ctx, grad_output): return None x = torch.randn(5, 5, requires_grad=True) r = MyFunction.apply(x * x) (r * x).sum().backward() def test_return_duplicate(self): class DoubleDuplicate(Function): @staticmethod def forward(ctx, x): output = x * 2 return output, output @staticmethod def backward(ctx, grad1, grad2): return grad1 * 2 + grad2 * 2 def fn(x): a, b = DoubleDuplicate.apply(x) self.assertIs(a, b) return a + b x = torch.randn(5, 5, dtype=torch.double, requires_grad=True) gradcheck(fn, [x]) gradgradcheck(fn, [x]) def test_return_duplicate_inplace(self): class DoubleInplace(Function): @staticmethod def forward(ctx, x): x.mul_(2) ctx.mark_dirty(x) return x, x @staticmethod def backward(ctx, grad1, grad2): return grad1 * 2 + grad2 * 2 def inplace_fn(x): a, b = DoubleInplace.apply(x.clone()) self.assertIs(a, b) return a + b x = torch.randn(5, 5, dtype=torch.double, requires_grad=True) gradcheck(inplace_fn, [x]) gradgradcheck(inplace_fn, [x]) # Can't modify leaf variables in-place self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x)) # Functions which modify views in-place must return only one output self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0])) @suppress_warnings def test_resize(self): x = torch.ones(2, 3) self.assertTrue(x.resize(3, 2).size() == (3, 2)) def _test_setitem(self, size, index): x = torch.ones(*size, requires_grad=True) y = x + 2 y_version = y._version y[index] = 2 self.assertNotEqual(y._version, y_version) y.backward(torch.ones(*size)) expected_grad = torch.ones(*size) expected_grad[index] = 0 self.assertEqual(x.grad, expected_grad) def _test_setitem_tensor(self, size, index): x = torch.ones(*size, requires_grad=True) y = x + 2 y_version = y._version value = x.new(x[index].size()).fill_(7) value.requires_grad = True y[index] = value self.assertNotEqual(y._version, y_version) y.backward(torch.ones(*size)) expected_grad_input = torch.ones(*size) expected_grad_input[index] = 0 self.assertEqual(x.grad, expected_grad_input) self.assertEqual(value.grad, torch.ones_like(value)) # case when x broadcasts to as y[1] x = torch.randn(4, requires_grad=True) y = torch.zeros(2, 3, 4) y[1] = x y.backward(torch.randn(2, 3, 4)) self.assertEqual(x.size(), x.grad.size()) def test_setitem(self): self._test_setitem((5, 5), 1) self._test_setitem((5,), 1) self._test_setitem((1,), 0) self._test_setitem((10,), [[0, 4, 2]]) self._test_setitem((5, 5), [[0, 4], [2, 2]]) self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]]) self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)]) self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)]) self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]]) self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)]) self._test_setitem_tensor((5, 5), 3) self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]]) self._test_setitem_tensor((5,), 3) self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum()) self._test_setitem_tensor((5,), [[0, 1, 2, 3]]) self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]]) self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)]) self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)]) self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]]) self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)]) self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1, 3]), requires_grad=False), [2, 4], slice(None)]) def test_setitem_mask(self): mask = torch.BoolTensor(5, 5).bernoulli_() self._test_setitem((5, 5), Variable(mask)) self._test_setitem((5,), Variable(mask[0])) self._test_setitem((1,), Variable(mask[0, 0:1])) self._test_setitem_tensor((5, 5), Variable(mask)) self._test_setitem_tensor((5,), Variable(mask[0])) def test_select_sum(self): # both select and sum return Scalars in ATen; ensure they work together. x = torch.randn(10, dtype=torch.double, requires_grad=True) def func(x): return x.select(0, 1).sum() gradcheck(func, [x]) gradgradcheck(func, [x]) def test_diagonal_expanded_v(self): value = torch.rand([]) v_expanded = torch.tensor(value).expand(10) a = torch.rand(10, 10, dtype=torch.double, requires_grad=True) result, = torch.autograd.grad(a.diagonal(), a, v_expanded) self.assertEqual(result, torch.eye(10, dtype=torch.double) * value) def test_select_expanded_v(self): v_expanded = torch.rand(10).expand(10, 10) a = torch.rand(10, 10, 10, requires_grad=True) result, = torch.autograd.grad(a[0], a, v_expanded) expected = torch.zeros(10, 10, 10) expected[0] = v_expanded self.assertEqual(result, expected) def test_slice_expanded_v(self): v_expanded = torch.rand(10, 1).expand(2, 10, 10) a = torch.rand(10, 10, 10, requires_grad=True) result, = torch.autograd.grad(a[3:5], a, v_expanded) expected = torch.zeros(10, 10, 10) expected[3:5] = v_expanded self.assertEqual(result, expected) # TODO: opinfo this or move to unbind's test suite def test_unbind(self): stacked = torch.randn(3, 10, 10, requires_grad=True) x, y, z = stacked.unbind() grad = torch.randn(3, 10, 10) torch.autograd.backward([x, y, z], grad.unbind()) self.assertEqual(stacked.grad, grad) # check that it works with only one gradient provided (#9977) for i in range(3): stacked = torch.randn(3, 10, 10, requires_grad=True) outs = stacked.unbind() gi = grad.unbind()[i] g, = torch.autograd.grad(outs[i], stacked, gi) g_expected = torch.stack([gi if j == i else torch.zeros_like(gi) for j in range(3)], dim=0) self.assertEqual(g, g_expected) # TODO: opinfo this or move to fill's test suite def test_fill(self): root = torch.randn(4, 5, requires_grad=True) def func(root): x = root.clone() x.fill_(2) return x gradcheck(func, [root]) gradgradcheck(func, [root]) def test_unused_output(self): x = torch.randn(10, 10, requires_grad=True) outputs = x.chunk(5) o = outputs[2] o = o * 4 + 2 o.sum().backward() expected_grad = torch.zeros(10, 10) expected_grad[4:6] = 4 self.assertEqual(x.grad, expected_grad) with torch.no_grad(): x.grad.zero_() grad_output = torch.randn(2, 10) outputs = x.chunk(5) outputs[0].backward(grad_output) expected_grad = torch.zeros(10, 10) expected_grad[:2] = grad_output self.assertEqual(x.grad, expected_grad) # TODO: opinfo this or move to the sparse test suite def _test_sparse_gather(self, size_x, size_ind, dim): x = torch.randn(size_x, requires_grad=True) if len(size_ind) > 0 and len(size_x) > 0: ind = torch.randint(x.size(dim), size_ind) else: ind = torch.zeros(size_ind, dtype=torch.int64) out = torch.gather(x, dim, ind, sparse_grad=False) grad = torch.rand_like(out) out.backward(grad) grad_dense = x.grad.clone() x.grad = None out = torch.gather(x, dim, ind, sparse_grad=True) out.backward(grad) self.assertEqual(grad_dense, x.grad.to_dense()) def test_sparse_gather_dim0(self): self._test_sparse_gather((10, 10), (5, 10), 0) def test_sparse_gather_dim1(self): self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1) def test_sparse_gather_dim_neg(self): self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1) def test_sparse_gather_ind_scalar(self): self._test_sparse_gather((10,), (), 0) def test_sparse_gather_x_scalar(self): self._test_sparse_gather((), (2,), 0) def test_sparse_gather_both_scalar(self): self._test_sparse_gather((), (), 0) def test_gc_in_destructor(self): """ Previously, if a Function destructor triggered a garbage collection, the Variable's tp_dealloc handler would get called twice leading to a segfault. """ class CollectOnDelete(Function): def forward(self, x): return x def backward(self, grad_output): return grad_output def __del__(self): gc.collect() for _ in range(10): CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward() def test_naughty_autograd_function_attribute_access(self): class Id(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad_x): return grad_x with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"): f = Id() # # After raising warning, should still return an instance self.assertIsInstance(f, Id) x = torch.zeros(1, requires_grad=True) with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"): f(x) t = Id.apply(x) self.assertEqual(t.grad_fn.name(), "IdBackward") # THPFunction is the base class of both grad_fn and autograd functions, # which means that a lot of accessors on them may segfault. Test that we # properly error in this case. t = torch.ones(1, requires_grad=True) t._backward_hooks = dict() with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"): f._register_hook_dict(t) with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"): f.register_hook(lambda x, y: None) with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"): f.next_functions with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"): f.name() with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"): f.metadata @unittest.expectedFailure def test_naughty_anomaly_access(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, g): return g x = torch.zeros(1, requires_grad=True) y = MyFunction.apply(x) y.backward() y.grad_fn.metadata g = y.grad_fn del y g.metadata # this currently fails, but shouldn't def test_naughty_autograd_function_stashing_ctx(self): saved_ctx = [] class Id(Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x @staticmethod def backward(ctx, grad_x): saved_ctx.append(ctx) return ctx.saved_tensors p = torch.zeros(1, requires_grad=True) loss = Id.apply(p) loss.backward(retain_graph=True) del loss # At this point in time, it complains that the graph has been freed # (which indeed true, although a somewhat indirect way of stating the # problem). self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors) def test_custom_autograd_repeated_grad_grad(self): # This test failed the equality check in PR #22983; it's an interesting # and different test case worth enshrining. mult1 is not testing # anything that interesting, but mult2 is the interesting case. def mult1(x): return x.prod(dim=-1).prod(dim=-1) class Mult(torch.autograd.Function): @staticmethod def forward(ctx, x): y = mult1(x) ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return (grad_output * y)[:, None, None] / x mult2 = Mult.apply def check_gradgrad_repeated(x, y): gy, = torch.autograd.grad(y[0], x, create_graph=True) ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True) gy, = torch.autograd.grad(y[0], x, create_graph=True) ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True) self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1]) x = torch.ones(2, 4, 4).requires_grad_() check_gradgrad_repeated(x, mult1(x)) check_gradgrad_repeated(x, mult2(x)) def test_custom_autograd_no_early_free(self): # This test failed complaining that buffers had already been freed # prior to #22983. Also pretty interesting test case. class Double(torch.autograd.Function): @staticmethod def forward(ctx, x): y = x ** 2 ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, _ = ctx.saved_tensors return grad_output * 2 * x # this is equivalent, but uses the output of .forward() in .backward() class Double2(Double): @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return grad_output * 2 * y / x double = Double.apply double2 = Double2.apply x = torch.tensor(2).double().requires_grad_() self.assertTrue(gradcheck(double, x)) self.assertTrue(gradgradcheck(double, x)) self.assertTrue(gradcheck(double2, x)) self.assertTrue(gradgradcheck(double2, x)) y = double(x) torch.autograd.grad(y, x, create_graph=True) torch.autograd.grad(y, x) y = double2(x) torch.autograd.grad(y, x, create_graph=True) torch.autograd.grad(y, x) # should not error! def test_detach(self): x = torch.randn(10, 10, requires_grad=True) y = x + 2 y = y.detach() z = y * 4 + 2 self.assertFalse(y.requires_grad) self.assertFalse(z.requires_grad) x = torch.randn(10, 10, requires_grad=True) y = x * 2 y = y.detach() self.assertFalse(y.requires_grad) self.assertIsNone(y.grad_fn) z = x + y z.sum().backward() # This is an incorrect gradient, but we assume that's what the user # wanted. detach() is an advanced option. self.assertEqual(x.grad, torch.ones(10, 10)) # in-place detach x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) a = x * 2 (y + a).sum().backward(retain_graph=True) a.detach_() self.assertFalse(a.requires_grad) (y + a).sum().backward() # this won't backprop to x self.assertEqual(x.grad, torch.ones(10, 10) * 2) self.assertEqual(y.grad, torch.ones(10, 10) * 2) # in-place deatch on a view raises an exception view = x.narrow(0, 1, 4) self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_()) def test_detach_base(self): "detaching base does not detach view" x = torch.randn(10, 10, requires_grad=True) view = x.narrow(0, 1, 4) x.detach_() self.assertFalse(x.requires_grad) self.assertTrue(view.requires_grad) self.assertIsNotNone(view.grad_fn) self.assertIs(view._base, x) def _test_type_conversion_backward(self, t, ): fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True) fvar.double().sum().backward() self.assertEqual(fvar.grad, torch.ones_like(fvar)) self.assertEqual(type(fvar.grad), type(fvar)) dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True) dvar.float().sum().backward() self.assertEqual(dvar.grad, torch.ones_like(dvar)) self.assertEqual(type(dvar.grad), type(dvar)) def test_type_conversions(self): x = torch.randn(5, 5) self.assertIsInstance(x.float(), torch.FloatTensor) self.assertIsInstance(x.int(), torch.IntTensor) if torch.cuda.is_available(): self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor) self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor) self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor) if torch.cuda.device_count() >= 2: x2 = x.float().cuda(1) self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 1) x2 = x.float().cuda() self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 0) x2 = x2.cuda(1) self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 1) y = Variable(torch.randn(5).cuda(1), requires_grad=True) y.cpu().sum().backward() self.assertIs(y.grad.get_device(), 1) self.assertIs(y.long().get_device(), 1) for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]: for y_var in (True, False): y = torch.randint(5, (5, 5), dtype=t.dtype) y = Variable(y) if y_var else y self.assertIsInstance(x.type(t), t) self.assertIsInstance(x.type_as(y), t) # TODO: t.dtype should work t_dtype = t().dtype self.assertIsInstance(x.type(t_dtype), t) self.assertIs(t_dtype, x.type(t_dtype).dtype) self.assertEqual(y.data_ptr(), y.type(t).data_ptr()) if torch.cuda.is_available(): for x_cuda in (True, False): for y_cuda in (True, False): x_c = x.cuda() if x_cuda else x y_c = y.cuda() if y_cuda else y _, y_type = y_c.type().rsplit('.', 1) y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type self.assertEqual(y_c.type(), x_c.type(y_typestr).type()) self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype) self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr()) self._test_type_conversion_backward(lambda x: x) if torch.cuda.is_available(): self._test_type_conversion_backward(lambda x: x.cuda()) if torch.cuda.device_count() >= 2: # one of these has to be the non-default device self._test_type_conversion_backward(lambda x: x.cuda(0)) self._test_type_conversion_backward(lambda x: x.cuda(1)) def test_isolated_node(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) a = x + y b = torch.max(a, 1, True)[1].repeat(1, 5).double() o = (b + a).sum() o.backward() def test_shape(self): x = torch.randn(3, 4) self.assertEqual(2, len(x.shape)) self.assertEqual(x.shape[0], 3) self.assertEqual(x.shape[1], 4) def test_numpy_requires_grad(self): x = torch.randn(2, 2, requires_grad=True) err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead." with self.assertRaisesRegex(RuntimeError, err_msg_outputs): x.numpy() with torch.no_grad(): x.numpy() x = torch.randn(2, 2) x.numpy() with torch.no_grad(): x.numpy() def test_return_leaf(self): class Identity(Function): @staticmethod def forward(ctx, a, b): return a, a + b @staticmethod def backward(ctx, grad_a, grad_b): return grad_a + grad_b, grad_b hook_called = [False] x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) q, p = Identity.apply(x, y) # Make sure hooks only receive grad from usage of q, not x. def hook(grad): hook_called[0] = True self.assertEqual(grad, torch.ones(5, 5)) q.register_hook(hook) (q + p + x).sum().backward() self.assertEqual(x.grad, torch.ones(5, 5) * 3) self.assertEqual(y.grad, torch.ones(5, 5)) self.assertTrue(hook_called[0]) def test_return_leaf_inplace(self): class Inplace(InplaceFunction): @staticmethod def forward(ctx, a, b): ctx.mark_dirty(a) return a.add_(b), b + 2 @staticmethod def backward(ctx, grad_a, grad_b): return grad_a, grad_a + grad_b x = torch.randn(5, 5) y = torch.randn(5, 5, requires_grad=True) q, p = Inplace.apply(x, y) self.assertIs(q, x) self.assertIs(q.grad_fn.__class__, Inplace._backward_cls) self.assertTrue(q.requires_grad) q.sum().backward() self.assertEqual(y.grad, torch.ones(5, 5)) def test_leaf_assignment(self): x = torch.randn(5, 5) y = torch.randn(5, requires_grad=True) z = torch.randn(5, requires_grad=True) x[0] = y x[1] = 2 * z self.assertTrue(x.requires_grad) self.assertIsNot(x.grad_fn, None) x.sum().backward() self.assertEqual(y.grad, torch.ones(5)) self.assertEqual(z.grad, torch.ones(5) * 2) def test_no_grad_assignment(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5) with torch.no_grad(): x[0] = y self.assertTrue(x.requires_grad) self.assertIsNone(x.grad_fn) def test_no_grad_modifies_version(self): x = torch.randn(5, requires_grad=True) y = torch.randn(5, requires_grad=True) z = (x * y).sum() with torch.no_grad(): x *= 2 self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation', lambda: z.backward()) def test_no_grad_input(self): class MyFunction(Function): @staticmethod def forward(self, x): return x @staticmethod def backward(self, grad_output): return grad_output x = torch.randn(5, requires_grad=True) with torch.no_grad(): y = MyFunction.apply(x) self.assertTrue(x.requires_grad) self.assertIsNone(y.grad_fn) def test_backward_copy(self): # This tests checks backward engine for a very subtle bug that appreared # in one of the initial versions of autograd. Gradients tensors were # simply stored in lists while the function waited for all its gradients # to be computed. However, sometimes an output was used multiple times, # so the gradients needed to be summed. Engine used to keep a need_copy # set of tensors that will need a clone upon next addition and removed # them from the set as soon as the clone was performed. However, this # could lead to incorrect results if the same gradient tensor was # buffered in three places in the graph: # 1. When accumulating gradients in one of these places it was cloned # and removed from need_copy set. # 2. When accumulating in second place, it wasn't in the need_copy set, # so the gradients were simply accumulated in-place (which already # modified the grad in 3rd place) # 3. When accumulating in the third place, it wasn't in the need_copy set # as well, so the incoming gradient was summed in-place, yielding # incorrect results in all functions, except the first one. x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5, requires_grad=True) # Simulate that we're in the middle of the graph a = x + 2 b = y + 2 c = x + 2 # This op will just return grad_output two times in backward add1 = a + b add2 = add1 + c # Simulate a long branch, so grad_output will get buffered. for _ in range(4): a = a * 2 b = b * 2 c = c * 2 branch = a + b + c out = add2 + branch # expected gradients are: # for x: 34 (16 from final a, 16 from final c, 2 from add2) # for y: 17 (16 from final b, 1 from add2) grad_output = torch.ones(5, 5) out.backward(grad_output) self.assertEqual(x.grad, torch.ones(5, 5) * 34) self.assertEqual(y.grad, torch.ones(5, 5) * 17) def test_save_none_for_backward(self): test_case = self class MyFn(Function): @staticmethod def forward(ctx, input): ctx.save_for_backward(None, input, None) return input * input @staticmethod def backward(ctx, grad_output): n1, input, n2 = ctx.saved_tensors test_case.assertIsNone(n1) test_case.assertIsNone(n2) return 2 * input * grad_output x = torch.randn(5, 5, requires_grad=True) y = MyFn.apply(x) y.sum().backward() self.assertEqual(x.grad, 2 * x) def test_too_many_grads(self): class MyFn(Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad_output): return grad_output, None, None x = torch.randn(5, 5, requires_grad=True) y = MyFn.apply(x) y.sum().backward() self.assertEqual(x.grad, torch.ones_like(x)) def test_pickle(self): x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=False) def assert_strict_equal(var1, var2): self.assertEqual(var1, var2) self.assertEqual(var1.requires_grad, var2.requires_grad) serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)] for dump in serialized: xc, yc = pickle.loads(dump) assert_strict_equal(xc, x) assert_strict_equal(yc, y) def test_dep_nograd(self): class F1(Function): @staticmethod def forward(ctx, input): out = torch.randn(input.size()) ctx.mark_non_differentiable(out) return input, out @staticmethod def backward(ctx, grad_output, ignored): return grad_output class F2(Function): @staticmethod def forward(ctx, input, ignored): return input @staticmethod def backward(ctx, grad_output): return grad_output, None x = torch.randn(5, requires_grad=True) a, b = F1.apply(x) b = b + 1 # separate F1 from F2 by another op self.assertTrue(a.requires_grad) self.assertFalse(b.requires_grad) c = F2.apply(a, b) c.backward(torch.ones(c.size())) self.assertEqual(x.grad, torch.ones(x.size())) def test_set_grad_enabled(self): x = torch.tensor([1.], requires_grad=True) with torch.set_grad_enabled(False): y = x * 2 self.assertFalse(y.requires_grad) with torch.set_grad_enabled(True): y = x * 2 self.assertTrue(y.requires_grad) with torch.set_grad_enabled(False): torch.set_grad_enabled(True) y = x * 2 self.assertTrue(y.requires_grad) def test_simple_reentrant(self): y_data = torch.randn(2, 2) class Reenter(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x, requires_grad=True) ctx.y = Variable(y_data, requires_grad=True) ctx.output_var = ctx.x * ctx.y return ctx.output_var.detach() @staticmethod def backward(ctx, grad_output): with torch.enable_grad(): ctx.output_var.sum().backward() return ctx.x.grad * grad_output # Reentrant starts on CPU thread, finishs on GPU thread x = torch.randn(2, 2, requires_grad=True) out = Reenter.apply(x) out.sum().backward() self.assertEqual(x.grad, y_data) def test_reentrant_child_error(self): # Parent graph. a = torch.rand(3, 3, requires_grad=True) c = a * a # Reentrant child graph. b = torch.rand(3, 3, requires_grad=True) e = b * b f = TestAutograd.SimulateBackwardError.apply(e) reentrant_root = f.sum() class ReentrantFunc(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, grad): # Reentrant backward in child will throw an error. reentrant_root.backward() return grad d = ReentrantFunc.apply(c) with self.assertRaisesRegex(Exception, 'Simulate error'): d.sum().backward() def test_var_mean_differentiable(self): dim = [2, 4] keepdim = False input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True) input2 = deepcopy(input1) var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim) var2 = input2.var(dim=dim, keepdim=keepdim) mean2 = input2.mean(dim=dim, keepdim=keepdim) grad = torch.randn(3, 4, 6, 3, requires_grad=True) r1 = var1 * var1 * mean1 * mean1 r2 = var2 * var2 * mean2 * mean2 self.assertEqual(r1, r2, rtol=0.01, atol=0.0) torch.autograd.backward(r1, grad) torch.autograd.backward(r2, grad) self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0) @slowTest @skipIfNoLapack def test_lobpcg(self): def func(k, A, largest=True, B=None): X_shape = list(A.shape) X_shape[-1] = k X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device) if A.dim() > 2: X = X.expand(X_shape) D, U = torch.lobpcg(A=A, k=k, B=B, X=X) # LOBPCG uses a random initial eigenspace approximation # if parameter `X` is not provided. # This may cause a non-deterministic behavior # when it comes to the sign of an eigenvector # (note if v is an eigenvector, so is -v), # hence we eliminate this non-determinism # by making sure that each column of U # gets multiplied by the sign of its max (in absolute value) element. # Also, gradcheck changes the content of the input by +/- eps (default to 1e-06) # to compute the numerical gradient which can also cause the signs to flip. _, idx = U.abs().max(-2, keepdim=True) sign = U.gather(-2, idx).sign() U = U * sign return D, U # TODO: review if this can be ported to OpInfos or moved to test_linalg.py def run_symeig_test(k, sizes, largest=True): A = torch.rand(*sizes).double() A = A.matmul(A.transpose(-1, -2)) / 10 A.requires_grad_(True) gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False) # Custom gradient vectors for better stability due to some # non-determinism in the lobpcg's forward. # Note it is not required if symeig is in forward instead (tested). D_grad = torch.rand(*A.shape[:-2], k) / 100 U_grad = torch.rand(*A.shape[:-1], k) / 100 gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False) # check whether A.grad is symmetric A = A.detach().requires_grad_(True) D, U = func(k, A, largest) (D.sum() + U.sum()).backward() self.assertEqual(A.grad, A.grad.transpose(-1, -2)) # the tests below take about 1-2 minutes to finish, # but we want to be extra sure that the backward is correct. for largest in [True, False]: run_symeig_test(1, (6, 6), largest=largest) run_symeig_test(1, (2, 6, 6), largest=largest) run_symeig_test(1, (2, 2, 6, 6), largest=largest) run_symeig_test(2, (6, 6), largest=largest) run_symeig_test(2, (2, 6, 6), largest=largest) run_symeig_test(2, (2, 2, 6, 6), largest=largest) run_symeig_test(3, (9, 9), largest=largest) run_symeig_test(3, (2, 9, 9), largest=largest) run_symeig_test(3, (2, 2, 9, 9), largest=largest) def test_variable_traverse(self): def get_out_and_unrefed_cycle(): inp = torch.randn(10, requires_grad=True) tmp = inp.view(10, 1) out = tmp.view(10) # Create a reference cycle that contains an # intermediary Variable in the graph my_list = [] my_list.append(tmp) my_list.append(my_list) return out out = get_out_and_unrefed_cycle() gc.collect() # This will segfault if things have been erroneously released out.backward(torch.randn(out.size())) def test_maximum_and_minimum_subgradient(self): def run_test(f, a, b, expected_a_grad, expected_b_grad): a = torch.tensor(a, requires_grad=True) b = torch.tensor(b, requires_grad=True) z = f(a, b) z.sum().backward() self.assertEqual(a.grad, expected_a_grad) self.assertEqual(b.grad, expected_b_grad) run_test(torch.maximum, [0., 1., 2.], [1., 1., 1.], [0., 0.5, 1.], [1., 0.5, 0.]) run_test(torch.minimum, [0., 1., 2.], [1., 1., 1.], [1., 0.5, 0.], [0., 0.5, 1.]) # TODO: norm is deprecated, update these tests and port them to OpInfos # or test_linalg.py def test_norm_subgradient(self): def run_test(input_size, norm_deg): input = torch.zeros(*input_size, requires_grad=True) input.norm(norm_deg).backward() self.assertEqual(input.grad.abs().sum(), 0) run_test((10,), 2) run_test((10, 10), 2) run_test((10,), 3) run_test((10,), 1) run_test((10,), 1.5) run_test((10,), inf) def test_norm_inf_subgradient(self): def run_test(input, expected, dim=None): x = torch.tensor(input, requires_grad=True) out = x.norm(inf, dim=dim, keepdim=True) out.backward(torch.ones(out.size())) self.assertEqual(x.grad, expected) run_test([0., 0., 0.], [0., 0., 0.]) run_test([1., 0., 1.], [0.5, 0., 0.5]) run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]]) run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,)) run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2)) # TODO: review porting these to OpInfo tests def test_pow_zero_tensor_gradient(self): def run_test(input_size, exponent): input = torch.zeros(*input_size, requires_grad=True) input.pow(exponent).sum().backward() self.assertEqual(input.grad.abs().sum(), 0) run_test((10,), torch.zeros(10)) run_test((10, 10), torch.zeros(10, 10)) run_test((10,), 0) def test_pow_scalar_base(self): a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_() gradcheck(lambda a: torch.pow(2, a), (a,)) def test_sinc(self): # The derivative of sinc(x) at x=0 has to be special cased. # A naive computation will result in 0/0 -> NaN. # We also need to be careful when we are very close to 0, as the # derivative's denominator is squared, and there are some floats # that are positive and whose squares are zero. a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0], dtype=torch.double, requires_grad=True) gradcheck(torch.sinc, a) def test_profiler(self): x = torch.randn(10, 10) with profile(use_kineto=kineto_available()) as p: self.assertTrue(torch.autograd._profiler_enabled()) y = x * 2 + 4 self.assertFalse(torch.autograd._profiler_enabled()) names = ['aten::mul', 'aten::add'] found_indices = set() for evt in p.function_events: if evt.name in names: found_indices.add(names.index(evt.name)) self.assertEquals(len(found_indices), len(names)) def test_profiler_seq_nr(self): with profile(use_kineto=kineto_available()) as p: x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) z = x + y s = z.sum() s.backward() print(p.key_averages().table( sort_by="self_cpu_time_total", row_limit=-1)) # expecting aten::add, aten::sum to have the sequence numbers, # expecting the corresponding backward nodes to have the same numbers # as the forward ops add_seq_nr = -1 sum_seq_nr = -1 found_add = found_sum = False found_bwd_add = found_bwd_sum = False found_empty = False for e in p.function_events: # Ignore record_function user scope. if "autograd::engine::evaluate_function" in e.name: continue if e.name == "aten::add": add_seq_nr = e.sequence_nr self.assertFalse(found_add) found_add = True elif e.name == "aten::sum": sum_seq_nr = e.sequence_nr self.assertFalse(found_sum) found_sum = True elif "Add" in e.name and "Backward" in e.name: self.assertEqual(e.sequence_nr, add_seq_nr) self.assertFalse(found_bwd_add) found_bwd_add = True elif "Sum" in e.name and "Backward" in e.name: self.assertEqual(e.sequence_nr, sum_seq_nr) self.assertFalse(found_bwd_sum) found_bwd_sum = True # check that nested ops (e.g. empty) don't have # sequence number if e.name == "aten::empty": self.assertEqual(e.sequence_nr, -1) found_empty = True self.assertGreaterEqual(add_seq_nr, 0) self.assertGreaterEqual(sum_seq_nr, 0) self.assertNotEqual(add_seq_nr, sum_seq_nr) self.assertTrue(found_add) self.assertTrue(found_sum) self.assertTrue(found_bwd_add) self.assertTrue(found_bwd_sum) self.assertTrue(found_empty) def test_profiler_unboxed_only(self): x = torch.rand(3, 4) with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof: x.resize_([3, 2]) def test_profiler_propagation(self): def foo(x): with record_function("in_foo") as rf: return x * 2 x = torch.rand(3, 4) traced_foo = torch.jit.trace(foo, x) def bar(x): with record_function("in_bar") as rf: # we expect that profiler will be able # propagate across fork fut = torch.jit._fork(traced_foo, x) y = torch.jit._wait(fut) # note: continuation (and rf's end) can # be executed in a different thread with record_function("in_bar_after_wait") as rf2: y = y * 2 return y traced_bar = torch.jit.trace(bar, x) with profile(use_kineto=kineto_available()) as p: traced_bar(x) found_foo = False found_bar = False found_bar_after_wait = False for info in p.function_events: if info.name == "in_foo": self.assertFalse(found_foo) found_foo = True elif info.name == "in_bar": self.assertFalse(found_bar) found_bar = True elif info.name == "in_bar_after_wait": self.assertFalse(found_bar_after_wait) found_bar_after_wait = True self.assertTrue(found_foo) self.assertTrue(found_bar) self.assertTrue(found_bar_after_wait) def test_record_function_callbacks(self): x = torch.randn(10, 10) with profile(use_kineto=kineto_available()) as p: with record_function("foo"): y = x * 2 + 4 function_events = p.function_events foo_event = [event for event in function_events if "foo" in event.name][0] self.assertEqual(foo_event.count, 1) def test_profiler_aggregation_fake(self): events = EventList() id = [0] def get_id(): id[0] = id[0] + 1 return id[0] # [[thread_id, [(start, end, id), ....]], ...] # Using list instead of a dict so order is guaranteed for any Python # version threads = [ [1, [(0, 1, get_id()), (1, 2, get_id())]], [0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]], ] for thread, ranges in threads: for range in ranges: assert(len(range) == 3) events.append( FunctionEvent( id=range[2], node_id=0, name="", thread=thread, start_us=range[0], end_us=range[1], ) ) events._populate_cpu_children() # Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2] # as a child of [1, 3] res = [[], [], [], [], [4]] def get_children_ids(event): return [child.id for child in event.cpu_children] assert([get_children_ids(event) for event in events] == res) def test_profiler_aggregation_table(self): """ Test if the profiling result is aggregated for `str(prof)` See: https://github.com/pytorch/pytorch/issues/37500 """ x = torch.randn(1024) with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof: torch.einsum("i->", x) prof_str = str(prof) prof_table = prof.table() self.assertEqual(prof_table, prof_str) def test_profiler_function_event_avg(self): avg = FunctionEventAvg() avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15)) avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30)) avg.add(avg) self.assertEqual(avg.key, "foo") # aggregate stats self.assertEqual(avg.count, 4) self.assertEqual(avg.cpu_time_total, 30) self.assertEqual(avg.self_cpu_time_total, 30) self.assertEqual(avg.cuda_time_total, 0) # average stats self.assertEqual(avg.cpu_time, 7.5) self.assertEqual(avg.cuda_time_total, 0) def test_profiler_shapes(self): print("") layer1 = torch.nn.Linear(20, 30) layer2 = torch.nn.Linear(30, 40) input = torch.randn(128, 20) with profile(record_shapes=True, use_kineto=kineto_available()) as prof: layer2(layer1(input)) print(prof.function_events) linear_expected_shapes = [ [[128, 20], [30, 20], [30]], [[128, 30], [40, 30], [40]], ] found_indices = set() for event in prof.function_events: if event.name == "aten::linear": self.assertTrue(event.input_shapes in linear_expected_shapes) found_indices.add(linear_expected_shapes.index(event.input_shapes)) self.assertEqual(len(found_indices), len(linear_expected_shapes)) def test_profiler_aggregation_lstm(self): print("") rnn = torch.nn.LSTM(10, 20, 2) total_time_s = 0 with profile(record_shapes=True, use_kineto=kineto_available()) as prof: for i in range(20): input = torch.randn(5, 3, 10) h = torch.randn(2, 3, 20) c = torch.randn(2, 3, 20) start = time.time() rnn(input, (h, c)) end = time.time() total_time_s += end - start print(prof.table( sort_by="self_cpu_time_total", row_limit=10, header="TEST")) print(prof.key_averages(group_by_input_shape=True).table( sort_by="self_cpu_time_total", row_limit=10)) print(prof.table( sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True)) print(prof.key_averages(group_by_input_shape=True).table( sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True)) total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default print( "Total time based on python measurements: ", _format_time(total_time_us) ) print( "CPU time measurement python side overhead: {:.2f}%".format( (total_time_us / prof.self_cpu_time_total - 1.0) * 100.0 ) ) if sys.platform != "win32": with tempfile.NamedTemporaryFile() as trace_file: prof.export_chrome_trace(trace_file.name) def test_record_function(self): x = torch.randn(10, 10) def forward(x): with record_function("outer"): y = x * 2 + 4 with record_function("inner"): y = y - 1 y = y / 1 forward(x) with profile(use_kineto=kineto_available()) as p: forward(x) events = p.function_events important_events = [ 'outer', 'aten::mul', 'aten::add', 'inner', 'aten::sub', 'aten::div' ] idx = 0 for info in events: if info.name == important_events[idx]: idx = idx + 1 if idx == len(important_events): break self.assertEqual(idx, len(important_events)) # We can also use record_function to decorate arbitrary function @record_function('my_func') def f(x, y): return x + y with profile(use_kineto=kineto_available()) as p: f(1, 2) self.assertTrue('my_func' in str(p)) def test_record_function_multithreaded(self): rf = record_function("outer") rf.__enter__() with record_function("inner"): # test that exiting the record function after starting another one # doesn't throw. rf.__exit__(None, None, None) with record_function("inner"): rf.__enter__() # test that exiting the record function after ending another one # doesn't throw. rf.__exit__(None, None, None) def test_dir(self): x = torch.randn(10, 10) keys = dir(x) self.assertIn('shape', keys) # real and imag are only implemented for complex tensors. y = torch.randn(10, 10, dtype=torch.cfloat) for key in ['real', 'imag']: self.assertRaises(RuntimeError, lambda: hasattr(x, key)) self.assertTrue(hasattr(y, key)) keys.remove(key) for key in keys: self.assertTrue(hasattr(x, key)) def test_as_strided(self): def test(x, prepro_fn, size, strides, offset=None): x = x.to(torch.double).detach().requires_grad_() # Check that forward will **not** resize storage because it may # cause NaN in output and fail numerical Jacobian check consequently with torch.no_grad(): y = prepro_fn(x) if prepro_fn is not None else x max_offset = sum((si - 1) * st for si, st in zip(size, strides)) max_offset += offset if offset is not None else y.storage_offset() assert max_offset < len(y.storage()), "test case resizes storage" def closure(x): if prepro_fn is not None: x = prepro_fn(x) return x.as_strided(size, strides, offset) gradcheck(closure, [x]) gradgradcheck(closure, [x]) # test test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2) # test crazy stride at dim with size 1 case test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2) # test expand case test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2) test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4) test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0) # test non-expand overlapping case test(torch.randn(35), None, [6, 6], [5, 1], 2) test(torch.randn(15), None, [3, 2], [3, 6], 2) # test transpose case test(torch.randn(3, 4), None, [4, 3], [1, 4]) # test "getting things outside the input" case x = torch.randn(6, 2) test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3]) # test select on expanded input case test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0) # TODO: see if these tests can be ported to OpInfos or moved to # test_tensor_creation_ops.py def _test_lerp_tensor_weights(self, cast): def construct_inputs(*shapes): start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_() end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_() weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_() return [start, end, weight] all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting ((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1 ((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1 ((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1 ((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2 ((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2 ((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2 ((3, 3), (3, 3, 3), (3,))] # all broadcasting for shapes in all_test_shapes: cur_inputs = construct_inputs(*shapes) gradcheck(torch.lerp, cur_inputs) gradgradcheck(torch.lerp, cur_inputs) def test_lerp_tensor_weights(self): self._test_lerp_tensor_weights(lambda t: t) # TODO: see if these tests can be moved to OpInfos or test_reductions.py def test_reduce_dtype(self): def test_reduction(op, has_no_dim, takes_dtype=True): x = torch.randn(3, 3, dtype=torch.float, requires_grad=True) if has_no_dim: grad1, = torch.autograd.grad([op(x)], [x]) grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x]) self.assertEqual(grad1, grad2) self.assertEqual(grad2.dtype, torch.float) gi = torch.randn(op(x, dim=0).shape, dtype=torch.float) grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi) if takes_dtype: grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double()) else: grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double()) self.assertEqual(grad1, grad2) self.assertEqual(grad2.dtype, torch.float) test_reduction(torch.sum, True) test_reduction(torch.prod, True) test_reduction(torch.cumsum, False) test_reduction(torch.cumprod, False) test_reduction(torch.logcumsumexp, False, takes_dtype=False) def test_inplace_on_view_saved_output(self): # Test an in-place operation on a view in which the in-place op saves # its output. Previously, this created a reference cycle. dealloc = [0] class IncrementOnDelete(object): def __del__(self): dealloc[0] += 1 def test(): root = torch.randn(3, 3, requires_grad=True) copy = root.clone() copy.grad_fn.register_hook(IncrementOnDelete()) view = copy.view(9) torch.nn.functional.relu(view, inplace=True) test() self.assertEqual(dealloc[0], 1) def test_inplace_on_view_leaf_errors(self): # Issue #21875: Fail faster (when we try to modify the view vs. in backward()) x = torch.zeros(1, requires_grad=True) y = x.view_as(x) with self.assertRaisesRegex(RuntimeError, "a view of a leaf Variable that " "requires grad is being used in " "an in-place operation."): y.add_(1) def test_inplace_on_view_backward(self): # Issue #10532: Make sure that this does not raise RuntimeError. net = nn.Sequential( nn.InstanceNorm2d(2), nn.ReLU(True) ) x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True) g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True) torch.autograd.grad(g.sum(), [x]) self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]])) # https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8 inputs = torch.ones((1, 3, 256, 256), requires_grad=True) tmp1 = (inputs + 1).view_as(inputs) tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True) prob_interpolated = torch.sigmoid(tmp2) gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs, grad_outputs=torch.ones(prob_interpolated.size()), create_graph=True, retain_graph=True)[0] gradient_penalty = gradients.sum() gradient_penalty.backward() fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0] self.assertEqual(fn.name(), "ThresholdBackwardBackward0") def test_inplace_on_view_weak_grad_fn(self): # Issue 23502: Test that b's grad_fn is preserved. a = torch.arange(10.0, requires_grad=True) b = a.narrow(0, 0, 2).clone().view(-1) b.relu_() c = b.clone() del b gc.collect() s = c.sum() s.backward() self.assertEqual(s, torch.tensor(1.0)) # Issue #21875: Fail faster (when we try to modify the view vs. in backward()) a = torch.rand(10, requires_grad=True).narrow(0, 0, 10) with self.assertRaises(RuntimeError): b = a.relu_() # TODO: see if these tests can be moved to OpInfo or test_binary_ufuncs.py def test_mul_out(self): a = torch.randn(2, 2, requires_grad=True) b = torch.randn(2, 2, requires_grad=True) x = torch.zeros_like(a) # out=... functions don't support automatic differentiation currently self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x)) # the inputs can require grad if we're in no_grad() mode with torch.no_grad(): torch.mul(a, b, out=x) self.assertEqual(x, a * b) def test_mul_out_result_requires_grad(self): a = torch.randn(2, 2) b = torch.randn(2, 2) x = torch.zeros(2, 2, requires_grad=True) # we should throw an exception if the output requires grad self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x)) # TODO: see if this test can be OpInfo'd or moved to diagonal's test suite def test_diagonal_derivative_requires_grad(self): # test that the backward requires grad # we do this is because diagonal_backward uses inplace # operations and gradgradcheck does not catch whether # they works as expected (it will succeed even if # the gradient has requires_grad == False a = torch.randn(5, 6, requires_grad=True) b = torch.diagonal(a)**2 c = b.sum() d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True) self.assertTrue(d.requires_grad) def test_anomaly_detect_nan(self): size = 10 class MyFunc(Function): @staticmethod def forward(ctx, inp1, inp2, fail_0th): ctx.fail_0th = fail_0th return inp1.sum(0, keepdim=True) @staticmethod def backward(ctx, gO): gI = gO.clone().expand(size) gI[0] = 0 gI[0] /= 0 # Generate a nan if ctx.fail_0th: return gI, None, None else: return None, gI, None inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, inp, True) out.backward() # Should not fail inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, inp, True) with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): out.backward() self.assertIn('No forward pass information', str(w[0].message)) inp = torch.rand(size, requires_grad=True) with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): out = MyFunc.apply(inp, inp, False) out.backward() self.assertIn('MyFunc.apply', str(w[0].message)) def test_nested_anomaly_detect_nan(self): size = 10 class MyFunc(Function): @staticmethod def forward(ctx, inp1, fail_0th): ctx.fail_0th = fail_0th ctx.save_for_backward(inp1) return inp1.sum(0, keepdim=True) @staticmethod def backward(ctx, gO): inp, = ctx.saved_tensors fail_0th = ctx.fail_0th g = gO.clone().expand(size) gI = MyFunc2.apply(g * inp, g + inp, fail_0th) return gI, None class MyFunc2(Function): @staticmethod def forward(ctx, inp1, inp2, fail_0th): ctx.fail_0th = fail_0th return inp1 * 2.0 + inp2 @staticmethod def backward(ctx, gO): fail_0th = ctx.fail_0th g1 = gO.clone() g2 = gO.clone() g1[0] = 0 g2[0] = 0 # generate a nan if fail_0th: g1[0] /= 0 else: g2[0] /= 0 return g1, g2, None inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, True) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() gsum.backward() # should not fail inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, True) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."): with detect_anomaly(): gsum.backward() self.assertIn('No forward pass information', str(w[1].message)) inp = torch.rand(size, requires_grad=True) with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."): with detect_anomaly(): out = MyFunc.apply(inp, False) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() gsum.backward() self.assertIn('MyFunc2.apply', str(w[1].message)) self.assertIn('MyFunc.apply', str(w[2].message)) def test_anomaly_grad_warnings(self): # PyTorch won't throw warnings if there is an error # but we'd want to at least see them in stderr class StdErrDiverter: def __enter__(self): self.stderr_orig = sys.stderr self.stderr_new = io.StringIO() sys.stderr = self.stderr_new return self def __exit__(self, *args): self.captured = self.stderr_new.getvalue() sys.stderr = self.stderr_orig # if the warnings don't throw, they will be handled as regular warnings with self.assertRaisesRegex(RuntimeError, "one of the variables needed for gradient computation has been " "modified by an inplace operation"): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): a = torch.randn(5, requires_grad=True) d1 = a + 1 d2 = d1 ** 2 d1 += 1 torch.autograd.grad(d2.sum(), a) self.assertEqual(len(w), 2) self.assertIn('Anomaly Detection has been enabled', str(w[0].message)) self.assertIn('Error detected in PowBackward0', str(w[1].message)) # if the warning throws, it will be printed to sys.stderr with self.assertRaisesRegex(RuntimeError, "one of the variables needed for gradient computation has been " "modified by an inplace operation"): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): warnings.simplefilter("error") with StdErrDiverter() as s: a = torch.randn(5, requires_grad=True) d1 = a + 1 d2 = d1 ** 2 d1 += 1 torch.autograd.grad(d2.sum(), a) self.assertEqual(len(w), 1) self.assertIn('Anomaly Detection has been enabled', str(w[0].message)) self.assertIn('Error detected in PowBackward0', s.captured) def test_anomaly_assign_parent_cleanup(self): # Test that python objects created are properly cleaned up when assign_parent is called import weakref def get_ref(): # we use torch.exp here but any function that will construct a new node in its # backward call in grad mode will work x = torch.randn(2, 2, requires_grad=True) t = x.exp() # ExpBackward calls mul, creating the MulBackward node when create_graph=True. # In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to # MulBackward's anomaly metadata dict, creating the following reference chain: # # grad -> MulBackward -> PyObject -> ExpBackward # with detect_anomaly(): grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True) # We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict # # (PyObject) -> ExpBackward -> dict -> *Foo* # t ----^ WeakRef ---^ # # We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed # We can test this by seeing whether Foo is not kept alive once t is destroyed class Foo(object): pass my_obj = Foo() meta_dict = t.grad_fn.metadata meta_dict[0] = my_obj ref = weakref.ref(my_obj) return t, ref t, ref = get_ref() self.assertIsNotNone(ref()) del t self.assertIsNone(ref()) def test_nested_anomaly_printstack_cleanup(self): # Test if metadata dict PyObject is properly destroyed import weakref def get_ref(): # This is similar to the construction in test_anomaly_assign_parent_cleanup: # # MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo # out ---^ WeakRef ---^ # # We want to check that Foo is still properly destroyed even when MyFunc2Backward's # AnomalyMetadata calls printstack, which does some python object manipulation. # # You might be wondering why we still have to test_anomaly_assign_parent_cleanup, # since if PyObject is not destroyed here, wouldn't this test would detect that also? # The answer is that custom function's PyObject (THPFunction) actually only hold # a weak reference to the c++ node! class MyFunc(Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x @staticmethod def backward(ctx, gO): x, = ctx.saved_tensors return MyFunc2.apply(x) class MyFunc2(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, gO): return gO + float("NaN") inp = torch.rand(1, requires_grad=True) out = MyFunc.apply(inp) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."): with detect_anomaly(): ginp.backward() class Foo(object): pass my_obj = Foo() meta_dict = out.grad_fn.metadata meta_dict[0] = my_obj ref = weakref.ref(my_obj) return out, ref t, ref = get_ref() self.assertIsNotNone(ref()) del t self.assertIsNone(ref()) # TODO: update these tests to use the linalg module and move to test_linalg.py @skipIfNoLapack def test_eig_no_eigenvectors(self): A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True) w, v = torch.eig(A, eigenvectors=False) with self.assertRaisesRegex(RuntimeError, 'is not differentiable'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_eig_complex_eigenvalues(self): A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True) w, v = torch.eig(A, eigenvectors=True) with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_symeig_no_eigenvectors(self): A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True) w, v = torch.symeig(A, eigenvectors=False) with self.assertRaisesRegex(RuntimeError, 'is not differentiable'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_svd_no_singularvectors(self): A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True) u, s, v = torch.svd(A, compute_uv=False) with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'): torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)]) def test_no_grad_copy(self): # create autograd function that saves grad pointer as class static class MyFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): MyFunc.static_grad_ptr = grad.data_ptr() return grad, grad class NonContGradFunc(Function): @staticmethod def forward(ctx, inp1): ctx.size = inp1.size() return torch.tensor([1.]) @staticmethod def backward(ctx, grad): return torch.ones(1).expand(ctx.size) a = torch.randn(5, 6, requires_grad=True) b = torch.randn(5, 6, requires_grad=True) # non-contiguous grad should be copied NonContGradFunc.apply(MyFunc.apply(a, b)).backward() self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr) self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr) # test case that should trigger no copy for one of a,b a.grad = b.grad = None MyFunc.apply(a, b)[1][0].backward() p_g = MyFunc.static_grad_ptr p_a = a.grad.data_ptr() p_b = b.grad.data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # check one of them is using the computed buffer self.assertTrue(p_a == p_g or p_b == p_g) def test_no_grad_copy_sparse(self): # create autograd function that saves grad pointer as class static class MyFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): MyFunc.static_grad_ptr = grad._values().data_ptr() return grad, grad class NonContGradFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): # Create a sparse tensor with non-contigous indices and values # and return as grad. v = torch.rand(1, 3) i = torch.ones(1, 1, dtype=torch.long) nv = v.expand(8, 3) ni = i.expand(1, 8) ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3])) NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr() return ngrad, ngrad a = torch.randn(10, 3, requires_grad=True) b = torch.randn(10, 3, requires_grad=True) input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) offsets = torch.tensor([0, 4]) import torch.nn.functional as F # test case that should trigger no copy for one of a,b emb_matrix = MyFunc.apply(a, b) loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() loss.backward(retain_graph=True) p_g = MyFunc.static_grad_ptr p_a = a.grad._values().data_ptr() p_b = b.grad._values().data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # check one of them is using the computed buffer self.assertTrue(p_a == p_g or p_b == p_g) # Run backwards multiple times to ensure accumulation works. for i in range(10): loss.backward(retain_graph=True) # non-contiguous indices and value, we should trigger a copy. a.grad = b.grad = None emb_matrix = NonContGradFunc.apply(a, b) loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() loss.backward(retain_graph=True) p_g = NonContGradFunc.static_grad_ptr p_a = a.grad._values().data_ptr() p_b = b.grad._values().data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # Verify we cloned both grads. self.assertFalse(p_a == p_g) self.assertFalse(p_b == p_g) # Run backwards multiple times to ensure accumulation works. for i in range(10): loss.backward(retain_graph=True) def test_gradcheck_single_input(self): def check(fast_mode): def f(inp): return inp.mul(5) gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode) gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_sparse_input(self): def check(fast_mode): def fn(sparse): return torch.sparse.sum(sparse) gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'): gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False, check_batched_grad=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_nondeterministic(self): class NonDetFunc(Function): @staticmethod def forward(ctx, x, jitter=0.0): ctx._jitter = jitter return x @staticmethod def backward(ctx, grad_out): return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None def check(fast_mode): inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True) gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'): gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'): gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode) gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False, fast_mode=fast_mode) gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False, fast_mode=fast_mode) gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_validates_inputs(self): def check(fast_mode): # when inputs are not dense, but check_sparse_nnz is false x = torch.rand(10, requires_grad=True).to_sparse() with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'): gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) # when none of the inputs require grad (always raises even if raise_exception=False) x = torch.rand(10, requires_grad=False) with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'): gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode) # (warning) when inputs are not double precision x = torch.ones(1, dtype=torch.float32, requires_grad=True) with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"): self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode)) # when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises # even if raise_exception=False) x = torch.ones(1, dtype=torch.float64, requires_grad=True) x = x.expand((2, 2)) with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'): gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) @unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled") def test_gradcheck_validates_input_mkldnn(self): # when mkldnn inputs, forward mode testing is not allowed # Update tolerances below to make sure the gradient match even in single precision floats # Use the warning assert to hide the float32 warning x = torch.ones(1).to_mkldnn().requires_grad_() with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"): with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'): gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True, atol=1e-1, rtol=1e-1) with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"): with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'): gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True, atol=1e-1, rtol=1e-1) @unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled") def test_gradcheck_test_outputs(self): def check(fast_mode): # when sparse outputs (always raise even if raise_exception=False) x = torch.rand(10, requires_grad=True).to_sparse() with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'): gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode) # when mkldnn outputs (always raise even if raise_exception=False) root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True) with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'): gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_check_no_differentiable_outputs(self): def check(fast_mode): # When none of the outputs are differentiable, but numerical gradient is not zero x = torch.ones((1,), requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'): gradcheck(lambda x: torch.tensor([x]), x) self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode)) # succeed when no outputs at all self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_check_batched_grad(self): def check(fast_mode): x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse() # runtime error while compute batched grad (print big error) with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'): gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode) self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, raise_exception=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_backward_mul_by_grad_output(self): # when grad_input is sparse and has incorrect sparse_dim/dense_dim def check(fast_mode): def fn(x): def hook(grad): if grad is not None: return grad.to_dense().to_sparse(1) return grad y = x.clone() y.register_hook(hook) return y.to_dense() x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse() with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'): gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) # when backward not multiplied by grad_output (non-sparse case) def fn2(x): y = x.clone() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'): gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode) self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode)) # when backward not multiplied by grad_output (sparse case) def fn3(x): y = x.clone().to_dense() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse() with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'): gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) # when layout of grad_input is not the same as input class Test(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, x): return x.to_sparse() x = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'): gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_undefined_grad(self): def check(fast_mode): # when encounter runtime error while running backward def fn(x): def hook(x): if x is None: raise RuntimeError("x is undefined") y = x.clone() y.register_hook(hook) return y x = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"): with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'): gradcheck(fn, (x,), fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_jacobian_mismatch(self): def check(fast_mode): def fn(x): # R -> R, C -> C y = x.clone() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(2, 2, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn, (x,), fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode)) x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128) with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'): gradcheck(fn, (x_c,), fast_mode=False) self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False)) def fn2(x): # R -> C y = torch.complex(x, x) y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(2, 2, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'): gradcheck(fn2, (x,), fast_mode=False) self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False)) def fn3(x): # C -> R y = torch.real(x) y.register_hook(lambda x: x + 1e-2) return y with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn3, (x_c,), fast_mode=False) self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_dense_and_sparse_inputs(self): def check(fast_mode): def fn(x, y): return x * y.coalesce().to_dense() a = torch.rand(2, 2, dtype=torch.double, requires_grad=True) b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True) self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) @unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled") def test_gradcheck_multiple_mkldnn_inputs(self): def check(fast_mode): def fn(x, y): return x + y.to_dense() a = torch.rand(10, requires_grad=True) b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True) self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode)) def fn2(x, y): return x.to_dense() + y.to_dense() c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True) self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_output_shape_or_dtype_depend_on_values(self): def check(fast_mode): def fn(x): if torch.all(x >= 1): return torch.cat([x, x]) else: return x a = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'): self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode)) def fn2(x): if torch.all(x >= 1): return x.to(torch.float32) else: return x with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'): self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_complex_non_complex_outputs(self): def fn(x, y): z = torch.complex(x, y) return z, x + 1 a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64) b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64) self.assertTrue(gradcheck(fn, (a, b))) def fn2(z): return z, torch.real(z) c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128) self.assertTrue(gradcheck(fn2, (c))) def test_gradcheck_get_numerical_jacobian(self): # get_numerical_jacobian is deprecated and no longer used internally by gradcheck from torch.autograd.gradcheck import get_numerical_jacobian def fn(inputs): # get_numerical_jacobian requires fn to take inputs as a tuple # and returns the jacobian wrt the first output x = inputs[0] y = inputs[1] return 2 * x + y, x + 2 * y a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"): jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6) self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double)) with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"): jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6) self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double)) self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double)) with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"): jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0) def test_gradcheck_get_analytical_jacobian(self): from torch.autograd.gradcheck import get_analytical_jacobian def fn(x, y): return 2 * x + y, x + 2 * y a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) outputs = fn(a, b) with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"): jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0]) self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double)) self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double)) self.assertTrue(reentrant) class NonDetFunc(Function): @staticmethod def forward(ctx, x, jitter=0.0): ctx._jitter = jitter return x @staticmethod def backward(ctx, grad_out): return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None outputs = NonDetFunc.apply(a, 1e-6) with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"): jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs) self.assertFalse(reentrant) with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"): jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0) def test_gradcheck_custom_error(self): from torch.autograd.gradcheck import GradcheckError def check(fast_mode): def fn(x): y = x.clone() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(2, 2, requires_grad=True) with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn, (x,), fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn, (x,), fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode)) def fn2(x): raise RuntimeError("Not a GradcheckError!") # Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"): gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_forward_ad(self): def fn(x, y): return x + y, y def bad_fn(x, y): # Hacky way to check if we're currently inside a forward ad level is_running_forward_ad = fwAD._current_level >= 0 if is_running_forward_ad: y_p, y_d = fwAD.unpack_dual(y) y = fwAD.make_dual(y_p, y_d * 1.1) return x + y, y err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1" for fast_mode in [True, False]: # Test for all inputs and outputs being real x = torch.rand(2, dtype=torch.double, requires_grad=True) y = torch.rand(2, dtype=torch.double, requires_grad=True) gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, err_msg): gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode) def basic_mul(x): return torch.view_as_real(torch.resolve_conj(x * 1j)) gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode) # Test for one input and one output being complex x = torch.rand(2, dtype=torch.cdouble, requires_grad=True) gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, err_msg): gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode) # Test for all inputs and outputs being complex y = torch.rand(2, dtype=torch.cdouble, requires_grad=True) gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, err_msg): gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode) def test_gradcheck_check_forward_or_backward_only(self): """Depending on settings for check_forward_ad and check_backward_ad, the correct codepaths should be reached (or not reached) """ fwd_fail_err_msg = "FAIL FWD" bwd_fail_err_msg = "FAIL BWD" class UserFn(Function): @staticmethod def forward(ctx, foo, fwd_bad, bwd_bad): ctx.fwd_bad = fwd_bad ctx.bwd_bad = bwd_bad return foo * 2 @staticmethod def vjp(ctx, gO): if ctx.bwd_bad: raise RuntimeError(bwd_fail_err_msg) else: return 2 * gO, None, None @staticmethod def jvp(ctx, gI, _1, _2): if ctx.fwd_bad: raise RuntimeError(fwd_fail_err_msg) else: return 2 * gI for fast_mode in (True, False): for check_forward_ad in (True, False): for check_backward_ad in (True, False): for fwd_bad in (True, False): for bwd_bad in (True, False): fwd_should_fail = fwd_bad and check_forward_ad bwd_should_fail = bwd_bad and check_backward_ad def run(): gradcheck(UserFn.apply, (x, fwd_bad, bwd_bad), check_forward_ad=check_forward_ad, check_backward_ad=check_backward_ad, check_undefined_grad=check_backward_ad, check_batched_grad=check_backward_ad, fast_mode=fast_mode) x = torch.rand(2, dtype=torch.double, requires_grad=True) if not check_forward_ad and not check_backward_ad: with self.assertRaisesRegex(AssertionError, "Expected at least one of"): run() continue if not fwd_should_fail and not bwd_should_fail: run() else: # If both fail, backward AD failure "hides" forward AD failure if fwd_should_fail: fail_msg = fwd_fail_err_msg if bwd_should_fail: fail_msg = bwd_fail_err_msg with self.assertRaisesRegex(RuntimeError, fail_msg): run() def test_version_counter(self): x = torch.randn(1, 2) # In-place op bumps version x_saved_version = x._version x.add_(1).add_(1) self.assertTrue(x._version > x_saved_version) # Differentiable view shares version counter xz = x[:] self.assertTrue(x._version == xz._version) xz.add_(1) self.assertTrue(x._version == xz._version) # `x.data = y` preserves version counter of `x` x_saved_version = x._version x.data = torch.randn(2, 3) self.assertTrue(x._version == x_saved_version) x.add_(1) self.assertTrue(x._version > x_saved_version) # Make sure `x` is still using the same version counter it shares with `xz` self.assertTrue(x._version == xz._version) # In-place op on `xz` also updates version of `x`, # because they share the version counter xz.add_(1) self.assertTrue(x._version == xz._version) def test_set_data_tensorimpl_type(self): # Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl # of type `SparseTensorImpl`. x = torch.randn(1, 2) x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1])) with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'): x.data = x_s def test_set_data_preserve_pyobj(self): a = torch.randn(1, 2) b = torch.randn(1, 2) b_id_saved = id(b) b.data = a self.assertTrue(b_id_saved == id(b)) @unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows") def test_thread_shutdown(self): code = """import torch from torch.autograd import Function class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): return grad for shape in [(1,), ()]: v = torch.ones(shape, requires_grad=True) MyFunction.apply(v).backward() """ s = TestCase.runWithPytorchAPIUsageStderr(code) self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown") @unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941") def test_deep_reentrant(self): class DeepReentrant(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x.detach(), requires_grad=True) ctx.x = ctx.x - 1 return ctx.x.detach() @staticmethod def backward(ctx, x): if ctx.x < 0: return x with torch.enable_grad(): DeepReentrant.apply(ctx.x).sum().backward() return x # Test stack overflow escape mechanism v = torch.tensor(2000.0, requires_grad=True) # This will cause stack overflow if reentrant calls are handled # in the same thread recursively DeepReentrant.apply(v).sum().backward() # Test stack overflow escape mechanism multiple times # to ensure reusing workers in the pool works fine v2 = torch.tensor(200.0, requires_grad=True) DeepReentrant.apply(v2).sum().backward() def test_reentrant_priority(self): order = [] class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, x): order.append("MyFunction") return x class Reentrant(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x.detach(), requires_grad=True) ctx.x = ctx.x - 1 return ctx.x.detach() @staticmethod def backward(ctx, x): order.append("Reentrant") if ctx.x < 0: return x with torch.enable_grad(): Reentrant.apply(ctx.x).backward() return x a = MyFunction.apply(torch.tensor(6.0, requires_grad=True)) b = Reentrant.apply(torch.tensor(9.0, requires_grad=True)) v = a * b v.backward() # The tasks for the Reentrant and MyFunction backward() will be added # to the queue in the autograd engine at the same time. The backward # for Reentrant will be executed first, which will then add other # backward tasks to the queue. We want to ensure all the reentrant tasks # are prioritized over the MyFunction backward task regardless of their # sequence numbers self.assertEqual(len(order), 11) self.assertEqual(order.count("Reentrant"), 10) self.assertEqual(order[-1], "MyFunction") @slowTest def test_checkpointing(self): num_inp = 2000 nz_inp = 10 nz_out = 10 nz_bottleneck = 1000 # small proxy network for some complex reasoning we want to do per input module = nn.Sequential( nn.Linear(nz_inp, nz_bottleneck), nn.ReLU(), nn.Linear(nz_bottleneck, nz_inp) ) feat_combined = [] for r in range(num_inp): data_r = torch.empty(1, nz_inp) data_r.uniform_() data_r.requires_grad = True feat_r = checkpoint(module, data_r) feat_combined.append(feat_r) # compute mean as a proxy for some joint reasoning mean_combined = torch.stack(feat_combined).mean() mean_combined.backward() def test_checkpoint_valid_reset_on_error(self): a = torch.randn(2, 2, requires_grad=True) with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"): b = checkpoint(torch.exp, a).sum() torch.autograd.grad(b, (a,)) c = checkpoint(torch.exp, a).sum() c.backward() def test_callback_adds_callback(self): called = [0] def callback_final(): called[0] += 1 def callback_adds_callback(): called[0] += 1 Variable._execution_engine.queue_callback(callback_final) class MyFunc(Function): @staticmethod def forward(ctx, input): return input @staticmethod @once_differentiable def backward(ctx, grad): Variable._execution_engine.queue_callback(callback_adds_callback) return grad a = torch.rand((3, 3), requires_grad=True) b = MyFunc.apply(a) b.sum().backward() self.assertEqual(called[0], 2) def _test_reentrant_with_callbacks(self, install_callbacks_in_depths): counter = {} counter["inner"] = 0 counter["outer"] = 0 def inc_inner_counter(): counter["inner"] += 1 def inc_outer_counter(): counter["outer"] += 1 class MyFunc(Function): @staticmethod def forward(ctx, input): return input @staticmethod @once_differentiable def backward(ctx, input): if 1 in install_callbacks_in_depths: # Add a callback to execute. Variable._execution_engine.queue_callback(inc_inner_counter) return input class MyReentrantFunc(Function): @staticmethod def forward(ctx, input): return input @staticmethod @once_differentiable def backward(ctx, input): if 0 in install_callbacks_in_depths: # Add a callback to execute. Variable._execution_engine.queue_callback(inc_outer_counter) # Reentrant backward call. tmp_inp = input.detach().requires_grad_() with torch.enable_grad(): tmp_out = (MyFunc.apply(tmp_inp)).sum() tmp_out.backward() return input t1 = torch.rand((3, 3), requires_grad=True) t2 = MyReentrantFunc.apply(t1) t3 = t2.sum() torch.autograd.backward([t3]) return counter def test_reentrant_with_callbacks_depth_0(self): # Verify callback is called only once. ret = self._test_reentrant_with_callbacks([0]) self.assertEqual(1, ret["outer"]) self.assertEqual(0, ret["inner"]) def test_reentrant_with_callbacks_depth_1(self): # Verify callback is called only once. ret = self._test_reentrant_with_callbacks([1]) self.assertEqual(0, ret["outer"]) self.assertEqual(1, ret["inner"]) def test_reentrant_with_callbacks_both_depths(self): # Verify callback is called twice. ret = self._test_reentrant_with_callbacks([0, 1]) self.assertEqual(1, ret["outer"]) self.assertEqual(1, ret["inner"]) def test_reentrant_with_leaf_variable_hook(self): handle = None param = torch.rand(10, requires_grad=True) def add_gradient_penalty_to_grad(grad): handle.remove() old_param_grad = grad param.grad = None # Add some sort of gradient penalty by directly updating the gradients with torch.enable_grad(): g = grad.detach().requires_grad_() new_param = param.detach().requires_grad_() out = ((g * 2) + new_param).sum() out.backward() res = g.grad + grad param.grad = old_param_grad return res handle = param.register_hook(add_gradient_penalty_to_grad) # Forward pass tmp = (param * param) loss = tmp.sum() # Compute the gradients loss.backward() def test_reentrant_with_non_leaf_variable_hook(self): handle = None param = torch.rand(10, requires_grad=True) def manual_increase_gradient(grad): handle.remove() # Add some sort of gradient penalty by directly updating the gradients with torch.enable_grad(): g = grad.detach().requires_grad_() out = ((g * 2) + 5).sum() out.backward() res = g.grad + grad return res # Forward pass tmp = (param * param) handle = tmp.register_hook(manual_increase_gradient) loss = tmp.sum() # Compute the gradients loss.backward() self.assertEqual(param.grad, 6 * param) def test_grad_fn_attr_bindings(self): # Check that the getter of each type returns what we want # See `gen_autograd_functions.py` for how the getters are generated # # This test is only meant to check if the codegen'd bindings work # Please help update this test if you update the names of any the fields we check! # a = torch.ones(1, requires_grad=True) b = torch.ones(1, requires_grad=True) out = torch.stack([a, b], dim=0) self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor] self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor) self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor) self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int self.assertIsInstance(out.grad_fn._saved_dim, int) out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x) out.sum().backward() with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): out.grad_fn._saved_tensors with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): out.grad_fn._raw_saved_tensors self.assertEqual(out.grad_fn._saved_dim, 0) a = torch.ones(2, 2, requires_grad=True) indices = torch.tensor([0, 1]) out = a[:, indices] self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?] self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor) self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor) self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int] self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int) out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x) with self.assertRaisesRegex(RuntimeError, "None is forbidden"): out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x) a = torch.ones(2, 2, requires_grad=True) out = a * a out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x) out.sum().backward() with self.assertRaisesRegex(RuntimeError, "after it has been freed"): out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x) a = torch.ones(1, 1, 2, requires_grad=True) out = torch.nn.functional.interpolate(a, 4, mode="linear") self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]? self.assertIsInstance(out.grad_fn._saved_output_size[0], int) self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool self.assertIsInstance(out.grad_fn._saved_align_corners, bool) self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]? out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear") self.assertIsNone(out.grad_fn._saved_output_size) self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,)) self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float) a = torch.ones(2, 2, requires_grad=True) out = torch.pdist(a, p=1) self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float self.assertIsInstance(out.grad_fn._saved_p, float) a = torch.ones(1, 1, 2, requires_grad=True) out = torch.logit(a, 1.) self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float? self.assertIsInstance(out.grad_fn._saved_eps, float) out = torch.logit(a) self.assertIsNone(out.grad_fn._saved_eps) if torch._C.has_lapack: a = torch.ones(1, 1, requires_grad=True) q, r = torch.linalg.qr(a, mode="reduced") self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str a = torch.tensor([1.], requires_grad=True) out = torch.div(a, 2., rounding_mode="trunc") self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str? out = torch.div(a, 2., rounding_mode=None) self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str? x = torch.zeros(5, requires_grad=True) out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j)) self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex cfloat = torch.tensor(1 + 0j, dtype=torch.complex64) out = torch.threshold(x, threshold=cfloat, value=(1 + 0j)) self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex out = torch.threshold(x, threshold=1., value=1.) self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float out = torch.threshold(x, threshold=1, value=1) self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int out = torch.threshold(x, threshold=False, value=False) self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool a = torch.ones(2, 2, requires_grad=True) out = a.as_strided((3,), (1,), 1) self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int? self.assertIsInstance(out.grad_fn._saved_storage_offset, int) out = a.as_strided((3,), (1,)) self.assertIsNone(out.grad_fn._saved_storage_offset) a = torch.ones(2, requires_grad=True) out = torch.tanh(a) self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output a = torch.randn(3, 5, requires_grad=True) b = torch.tensor([1, 0, 4]) loss = nn.NLLLoss() out = loss(a, b) self.assertIsNone(out.grad_fn._saved_weight) loss = nn.NLLLoss(weight=torch.ones((5,))) out = loss(a, b) self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor? out.sum().backward() with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): out.grad_fn._saved_weight def test_cant_create_saved_tensors(self): with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"): torch.autograd.SavedTensor() def test_custom_function_saved_tensors(self): def getFn(save=True): class MyFn(Function): @staticmethod def forward(ctx, x): if save: ctx.save_for_backward(x, None) return x @staticmethod def backward(ctx, g): return g return MyFn a = torch.randn(5, requires_grad=True) y = getFn(True).apply(a) self.assertEqual((a, None), y.grad_fn.saved_tensors) saved = y.grad_fn._raw_saved_tensors self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor) # We can't tell the underlying tensor is None without unpacking it self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor) # We catch that error when the user calls register_hooks on it with self.assertRaisesRegex(RuntimeError, "None is forbidden"): saved[1].register_hooks(lambda x: x, lambda x: x) with self.assertRaisesRegex(TypeError, "incompatible function arguments"): saved[0].register_hooks(lambda x: x) with self.assertRaisesRegex(TypeError, "incompatible function arguments"): saved[0].register_hooks(1, 1) saved[0].register_hooks(lambda x: x, lambda x: x) with self.assertRaisesRegex(RuntimeError, "already been set"): saved[0].register_hooks(lambda x: x, lambda x: x) y.sum().backward() # Using a reference to the SavedTensor object after the # saved variables have been released can lead to undefined behavior del saved with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): y.grad_fn._raw_saved_tensors with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): y.grad_fn.saved_tensors y = getFn(False).apply(a) self.assertEqual(y.grad_fn.saved_tensors, ()) self.assertEqual(y.grad_fn._raw_saved_tensors, ()) def test_autograd_views_codegen(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This test checks the behavior of two codegen functions (view_as and unbind) # with respect to view tracking and inplace operation on the output. def run_test(grad_mode, requires_grad, is_view, should_raise_tuple): def maybe_check_raise(fn, should_raise): self.assertTrue(should_raise is None or isinstance(should_raise, str)) if should_raise is not None: with self.assertRaisesRegex(RuntimeError, should_raise): fn() else: fn() inp = torch.rand(2, requires_grad=requires_grad).clone() with torch.set_grad_enabled(grad_mode): out = inp.view_as(inp) # Are they differentiable views? self.assertTrue(out._is_view() == is_view) # Are inplace allowed? maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0]) inp = torch.rand(2, requires_grad=requires_grad).clone() with torch.set_grad_enabled(grad_mode): out = inp.unbind() # Are they differentiable views? self.assertTrue(out[0]._is_view() == is_view) self.assertTrue(out[1]._is_view() == is_view) # Are inplace allowed? maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1]) maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2]) # should_raise contains None if it should not raise # should_raise contains a string of the error if it should raise # The 3 elements are for view_as, first output of unbind and second output of unbind run_test(grad_mode=True, requires_grad=False, is_view=True, should_raise_tuple=(None, None, None)) inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace." run_test(grad_mode=True, requires_grad=True, is_view=True, should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1"))) leaf_grad_err = "A view was created in no_grad mode and is being modified inplace" run_test(grad_mode=False, requires_grad=True, is_view=True, should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err)) run_test(grad_mode=False, requires_grad=False, is_view=True, should_raise_tuple=(None, None, None)) def test_inplace_not_requires_grad(self): class MyFn(torch.autograd.Function): @staticmethod def forward(ctx, inp): return inp.view_as(inp) @staticmethod def backward(ctx, grad): return grad # Original Tensor does not require grad a = torch.rand(1, 2) # Tensor being written does require grad b = torch.rand(1, requires_grad=True) # Take an invalid view on 'a' that should raise an error (warns during deprecation) view_a = MyFn.apply(a) with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"): view_a += b # Extra test for copy_ that is a manual implementation and could be easily # forgotten when the codegen is updated (warns during deprecation) a = torch.rand(1, 2) b = torch.rand(1, requires_grad=True) view_a = MyFn.apply(a) with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"): view_a.copy_(b) # Functions that should throw must properly throw a = torch.rand(1, 2) b = torch.rand(1, requires_grad=True) view_a = a.unbind()[0] with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns " "multiple views."): view_a.copy_(b) # Sanity check that views that should work still work a = torch.rand(1, 2) b = torch.rand(1, requires_grad=True) a.select(1, 0).copy_(b) def _do_test_autograd_simple_views_python(self, dtype): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This checks the autograd.Function behavior when we return one or multiple outputs # while one of these is an input, a view of an input or of a temporary tensor. # This indicator is used to track how many times the backward function was called bw_called = [0] # This indicator is used to check if the argument `ga` contains non-zero values ga_nz = [False] class IdOneOutput(Function): @staticmethod def forward(ctx, a, b, make_view): if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() return a @staticmethod def backward(ctx, ga): bw_called[0] += 1 return ga, None, None class IdTwoOutput(Function): @staticmethod def forward(ctx, a, b, make_view): if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() return a, a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 if ga.eq(0).all(): ga_nz[0] = False else: ga_nz[0] = True return ga + gab, gab, None class ViewOfTemp(Function): @staticmethod def forward(ctx, a, make_view): ctx.save_for_backward(a) if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() b = a.clone() return b.select(0, 0) @staticmethod def backward(ctx, grad): bw_called[0] += 1 a, = ctx.saved_tensors res = torch.zeros_like(a) res.select(0, 0).copy_(grad) return res, None fn_id_to_inplace_on_view_err_msg = { "one_output": ("Output 0 of IdOneOutputBackward is a view and is being " "modified inplace. This view was created inside a custom Function"), "two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace." " This view is the output of a function that returns multiple views."), "view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being " "modified inplace. This view was created inside a custom Function") } for fn_id in ["one_output", "two_output", "view_of_temp"]: for inplace in [True, False]: for make_view in [True, False]: # Used for special casing the tests below output_is_a_view = (make_view or fn_id == "view_of_temp") def fn(a, b): # never modify a, b inplace for gracheck a = a.clone() b = b.clone() if fn_id == "two_output": tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view) if inplace: tmp1 += 3 tmp2 += 3 else: tmp1 = tmp1 + 3 tmp2 = tmp2 + 3 tmp = tmp1 * tmp2 else: if fn_id == "one_output": tmp = IdOneOutput.apply(a, b, make_view) else: tmp = ViewOfTemp.apply(a + b, make_view) if inplace: tmp += 3 else: tmp = tmp + 3 return tmp.sum() a = torch.ones(2, dtype=dtype, requires_grad=True) b = torch.ones(2, dtype=dtype, requires_grad=True) err_msg = fn_id_to_inplace_on_view_err_msg[fn_id] if not inplace or not output_is_a_view: gradcheck(fn, (a, b), check_batched_grad=False) # Was the custom backward called properly bw_called[0] = 0 ga_nz[0] = True # For the case where the backward is called if inplace and output_is_a_view: with self.assertRaisesRegex(RuntimeError, err_msg): fn(a, b) else: fn(a, b).backward() expected_called = 1 expected_ga_nz = True if output_is_a_view and inplace: expected_called = 0 self.assertTrue(bw_called[0] == expected_called) self.assertTrue(ga_nz[0] == expected_ga_nz) def test_autograd_simple_views_python(self): self._do_test_autograd_simple_views_python(torch.double) self._do_test_autograd_simple_views_python(torch.cdouble) def test_autograd_inplace_views_creation_meta(self): # Tests creation_meta properly handled for inplace views class Func(torch.autograd.Function): @staticmethod def forward(ctx, x): return x.view_as(x) @staticmethod def backward(ctx, x): return x view_custom = Func.apply def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2): # This test checks the behavior of inplace-view functions when # the views are created in grad mode or not base = torch.rand(2, 3, requires_grad=requires_grad).clone() # 1. Create a view with `grad_mode=grad_mode_view` with torch.set_grad_enabled(grad_mode_view): if fn_type == "multi_view": inp = base.unbind()[0] elif fn_type == "custom" : inp = view_custom(base) else: inp = base.view_as(base) # 2. Perform inplace view with `grad_mode=grad_mode_iview` with torch.set_grad_enabled(grad_mode_iview): if error1 is not None: with self.assertRaisesRegex(RuntimeError, error1): fn(inp) return else: # If error is None, check that runs without error fn(inp) # 3. Do inplace on the (new) view if error2 is not None: with self.assertRaisesRegex(RuntimeError, error2): inp.add_(1) else: # If error is None, check that runs without error inp.add_(1) no_grad_err = "A view was created in no_grad mode" multi_view_err = "function that returns multiple views" custom_err = "view was created inside a custom Function" def run_tests(fn): for fn_type in ("normal", "multi_view", "custom"): for grad_mode_view in (True, False): for grad_mode_iview in (True, False): for requires_grad in (True, False): error1 = None # expected error when we do inplace_view on original view error2 = None # expected error when we do inplace on the resulting view if requires_grad: if not grad_mode_view and grad_mode_iview: error1 = no_grad_err if not grad_mode_view and not grad_mode_iview: error2 = no_grad_err if fn_type == "multi_view": if grad_mode_view and grad_mode_iview: error1 = multi_view_err if grad_mode_view and not grad_mode_iview: error2 = multi_view_err if fn_type == "custom": if grad_mode_view and grad_mode_iview: error1 = custom_err if grad_mode_view and not grad_mode_iview: error2 = custom_err run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2) # This list was created by logging gen_inplace_or_view_type.py # detach_ is excluded for this test because it cannot be applied to # views and thus does not return a view run_tests(lambda v: v.as_strided_((1, 0), (2, 2))) run_tests(lambda v: v.transpose_(0, 0)) run_tests(lambda v: v.t_()) run_tests(lambda v: v.squeeze_(0)) run_tests(lambda v: v.unsqueeze_(0)) run_tests(lambda v: v.swapdims_(0, 0)) run_tests(lambda v: v.swapaxes_(0, 0)) # TODO This is not the correct behavior - # See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627 def test_autograd_inplace_views_cross_dtype(self): # This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64) a = a_orig.clone() b = torch.view_as_real(a) b = b.transpose(0, 1) b += 1 b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2)) non_inplace_grad = a_orig.grad a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64) a = a_orig.clone() b = torch.view_as_real(a) b.transpose_(0, 1) b += 1 b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2)) inplace_grad = a_orig.grad # TODO: this is a bug! # once this is fixed, it should have the transpose removed: # self.assertEqual(non_inplace_grad, inplace_grad) self.assertEqual(non_inplace_grad.T, inplace_grad) def test_autograd_multiple_views_python(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This checks that multiples views in the forward are properly traced and how they # behave with respect to inplace operations. # This indicator is used to track how many times the backward function was called bw_called = [0] class ComplexView(Function): @staticmethod def forward(ctx, a, idx): res = a.narrow(0, idx, 1) res = a.select(0, idx) ctx.save_for_backward(a) ctx.idx = idx return res @staticmethod def backward(ctx, grad): bw_called[0] += 1 a, = ctx.saved_tensors res = torch.zeros_like(a) res.select(0, ctx.idx).copy_(grad) return res, None a = torch.ones(2, requires_grad=True) idx = 1 bw_called[0] = 0 out = ComplexView.apply(a.clone(), idx) out.sum().backward() self.assertTrue(bw_called[0] == 1) out = ComplexView.apply(a.clone(), idx) with self.assertRaisesRegex(RuntimeError, "Output 0 of ComplexViewBackward is a view and is being modified inplace"): out += 1 def test_autograd_python_custom_function_inplace(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This test checks custom autograd.Function that perform inplace operations bw_called = [0] # I) Single output class MyAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a @staticmethod def backward(ctx, grad): bw_called[0] += 1 return grad, grad a = torch.ones(2, requires_grad=True) b = torch.ones(2, requires_grad=True) # No extra inplace c = MyAdder.apply(a.clone(), b) c.sum().backward() self.assertTrue(bw_called[0] == 1) # With extra inplace on the output bw_called[0] = 0 c = MyAdder.apply(a.clone(), b) c += 2 c.sum().backward() self.assertTrue(bw_called[0] == 1) # The input is a view bw_called[0] = 0 c = MyAdder.apply(a.clone().view_as(a), b) c.sum().backward() self.assertTrue(bw_called[0] == 1) # Should not give non-inputs to mark_dirty class MyAdderBad(Function): @staticmethod def forward(ctx, a, b): c = 3 * a c.add_(b) ctx.mark_dirty(c) return c @staticmethod def backward(ctx, grad): bw_called[0] += 1 grad = 3 * grad return grad, grad a = torch.ones(2, requires_grad=True) b = torch.ones(2, requires_grad=True) with warnings.catch_warnings(record=True) as w: MyAdderBad.apply(a.clone(), b) self.assertEqual(len(w), 1) # II) Multiple outputs class MyBadAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a, a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 return ga + gab, ga + gab # No extra inplace bw_called[0] = 0 c, d = MyBadAdder.apply(a.clone(), b) (c * d).sum().backward() self.assertTrue(bw_called[0] == 1) # With extra inplace on the output bw_called[0] = 0 c, d = MyBadAdder.apply(a.clone(), b) c += 2 (c * d).sum().backward() self.assertTrue(bw_called[0] == 1) # The input is a view inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor" with self.assertRaisesRegex(RuntimeError, inplace_on_view_err): c, d = MyBadAdder.apply(a.clone().view_as(a), b) # III) Inplace + other op class MyOutPlaceAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a.clone(), a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 return ga + gab, ga + 2 * gab # We don't reuse the input def fn(a, b): orig_a = a.clone().view_as(a) c, d = MyOutPlaceAdder.apply(orig_a, b) return (c * d).sum() bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output." with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err): fn(a, b) def test_named_tensor_for_complex_views(self): names = ["batch", "height", "width", "complex"] z = torch.ones((5, 12, 14, 2), requires_grad=True) z_named = z.refine_names(*names) z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1]) z_complex.sum().backward() self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None))) def test_custom_function_return_view_in_nograd(self): class Alias(Function): @staticmethod def forward(ctx, x): return x[:] @staticmethod def backward(ctx, gx): return gx inp = torch.rand(2, requires_grad=True) with torch.no_grad(): output = Alias.apply(inp) with torch.no_grad(): expected_output = inp[:] # Calling the custom function should operate as if we called an equivalent op self.assertEqual(output.requires_grad, expected_output.requires_grad) # Check that in-place modification on view throws leaf_grad_err = "A view was created in no_grad mode and is being modified inplace" with self.assertRaisesRegex(RuntimeError, leaf_grad_err): output.zero_() def test_grad_mode_restored_reentrant(self): class MyFunction(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, go): original = torch._C.is_grad_enabled() with torch.enable_grad(): self.assertTrue(torch._C.is_grad_enabled()) foo = torch.rand(go.size(), requires_grad=True) grad, = torch.autograd.grad( foo ** 3, foo, grad_outputs=go ) self.assertTrue(torch._C.is_grad_enabled()) self.assertTrue(torch._C.is_grad_enabled() == original) return grad inp = torch.rand(3, requires_grad=True) # Case where original==False MyFunction.apply(inp).sum().backward() # Case where original==True MyFunction.apply(inp).sum().backward(create_graph=True) def test_power_function(self): a = torch.tensor([0., 0., 0.]) b = torch.tensor([-1., 0., 1.], requires_grad=True) c = torch.sum(a**b) c.backward() self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.])) s = 0 b = torch.tensor([-1., 0., 1.], requires_grad=True) c = torch.sum(s**b) c.backward() self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.])) def test_nansum_with_nans(self): a = torch.randn(2, 2, 2, 2, dtype=torch.double) with torch.no_grad(): a[a < 0.2] = float('nan') a.requires_grad = True # No args gradcheck(lambda x: x.nansum(), a) gradgradcheck(lambda x: x.nansum(), a) # Single dim gradcheck(lambda x: x.nansum((0)), a) gradgradcheck(lambda x: x.nansum((0)), a) # Multi dim gradcheck(lambda x: x.nansum((0, 2)), a) gradgradcheck(lambda x: x.nansum((0, 2)), a) gradcheck(lambda x: x.nansum((0, -1)), a) gradgradcheck(lambda x: x.nansum((0, -1)), a) # With keep-dim gradcheck(lambda x: x.nansum((0, -1), True), a) gradgradcheck(lambda x: x.nansum((0, -1), True), a) def test_nansum_dtype(self): inp = torch.randn(2, 2, 2, 2) with torch.no_grad(): inp[inp < 0.2] = float('nan') def test(inp, inp_dtype, out_dtype): with torch.no_grad(): a = inp.to(inp_dtype) a.requires_grad = True b = torch.sum(a, dtype=out_dtype) b.backward() self.assertEqual(a.dtype, a.grad.dtype) test(inp, torch.float, torch.double) test(inp, torch.double, torch.float) def test_nan_to_num(self): a = torch.randn(3, 3, 3, 3, dtype=torch.double) with torch.no_grad(): a[torch.rand_like(a) < 0.2] = float('nan') a[torch.rand_like(a) < 0.2] = float('inf') a[torch.rand_like(a) < 0.2] = -float('inf') a.requires_grad = True gradcheck(lambda x: x.nan_to_num(), a) gradgradcheck(lambda x: x.nan_to_num(), a) gradcheck(lambda x: x.nan_to_num(nan=1.2), a) gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a) gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a) gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a) gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a) gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a) gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a) gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a) gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a) gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a) def test_custom_function_error(self): class BadFw(Function): @staticmethod def backward(ctx, foo): return foo class BadBw(Function): @staticmethod def forward(ctx, foo): return foo.clone() class BadBw2(Function): @staticmethod def forward(ctx, foo): return foo.clone() @staticmethod def backward(ctx, foo): return foo @staticmethod def vjp(ctx, foo): return foo class BadJvp(Function): @staticmethod def forward(ctx, foo): return foo.clone() inp = torch.rand(1, requires_grad=True) with self.assertRaisesRegex(NotImplementedError, "must implement the forward"): BadFw.apply(inp) with self.assertRaisesRegex(RuntimeError, "must implement either the backward"): BadBw.apply(inp).sum().backward() with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"): BadBw2.apply(inp).sum().backward() with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"): with fwAD.dual_level(): d = fwAD.make_dual(inp, torch.rand_like(inp)) res = BadJvp.apply(d) def test_custom_function_forward_mode_view_checks(self): flag_to_error = { "ok": None, "not_a_view": "jvp is not returning a view", "not_a_view_of_inp": "jvp is not returning a view of the given", "not_a_view_of_inp_base": "jvp is not returning a view of the same base", } class ViewFn(Function): @staticmethod def forward(ctx, foo, flag): ctx.flag = flag ctx.size = foo.size() return foo.narrow(0, 0, 2) @staticmethod def vjp(ctx, gO): gI = gO.new_zeros(ctx.size) gI.narrow(0, 0, 2).copy_(gO) return gI, None @staticmethod def jvp(ctx, gI, _): res = gI.narrow(0, 0, 2) if ctx.flag != "ok": # Break the view in the gradients! res = res.clone() if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]: # Result should be a view, just of the wrong thing res = res.view_as(res) return res inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True) for flag, msg in flag_to_error.items(): def test_fn(inp): if flag == "not_a_view_of_inp_base": inp = inp.view_as(inp) return ViewFn.apply(inp, flag) if msg is None: gradcheck(test_fn, inp, check_forward_ad=True) else: with self.assertRaisesRegex(RuntimeError, msg): gradcheck(test_fn, inp, check_forward_ad=True) def test_custom_function_forward_mode_inplace_checks(self): class InplaceFn(Function): @staticmethod def forward(ctx, foo, flag): ctx.mark_dirty(foo) ctx.flag = flag foo.mul_(2) return foo @staticmethod def vjp(ctx, gO): return 2 * gO, None @staticmethod def jvp(ctx, gI, _): if ctx.flag: # Don't do the change inplace return 2 * gI else: gI.mul_(2) return gI inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True) def test_fn(inp, flag): inp = inp.clone() return InplaceFn.apply(inp, flag) gradcheck(test_fn, (inp, False), check_forward_ad=True) with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"): gradcheck(test_fn, (inp, True), check_forward_ad=True) def test_custom_function_forward_mode_wrong_formula(self): class UserFn(Function): @staticmethod def forward(ctx, foo, should_fail): ctx.should_fail = should_fail return foo * 2 @staticmethod def vjp(ctx, gO): return 2 * gO, None @staticmethod def jvp(ctx, gI, _): if ctx.should_fail: # Wrong gradient formula return 3 * gI else: return 2 * gI inp = torch.rand(10, dtype=torch.double, requires_grad=True) gradcheck(UserFn.apply, (inp, False), check_forward_ad=True) with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"): gradcheck(UserFn.apply, (inp, True), check_forward_ad=True) def test_custom_function_local_inplace(self): class MyFn(torch.autograd.Function): @staticmethod def forward(ctx, inp, inplace): view = inp.clone()[:3] if inplace: view += 2 return view @staticmethod def backward(ctx, grad): return grad, None base = torch.rand(10, requires_grad=True) foo = MyFn.apply(base, False) self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward") foo = MyFn.apply(base, True) self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward") def test_integer_outputs(self): inp = torch.rand(4, requires_grad=True) out = inp.argmax() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) out = inp.argmin() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) out = inp.argsort() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) val = torch.rand((), requires_grad=True) out = torch.searchsorted(inp, val) self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) bins = torch.linspace(0, 1.0, steps=100, requires_grad=True) vals = torch.rand(5, 5, requires_grad=True) out = torch.bucketize(vals, bins) self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) val = torch.empty(5).requires_grad_() out = val.count_nonzero() self.assertFalse(out.requires_grad) def assert_only_first_requires_grad(res): if not isinstance(res, tuple): res = (res,) self.assertTrue(res[0].requires_grad) for out in res[1:]: if out is not None: self.assertFalse(out.requires_grad) for sort in [True, False]: for return_inverse in [True, False]: for return_counts in [True, False]: res = torch.unique(inp, sorted=sort, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) res = torch.unique(inp, sorted=sort, return_inverse=return_inverse, return_counts=return_counts, dim=0) assert_only_first_requires_grad(res) res = torch.unique_consecutive(inp, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) res = torch.unique_consecutive(inp, return_inverse=return_inverse, return_counts=return_counts, dim=0) assert_only_first_requires_grad(res) # Here we test the internal functions to make sure all of them are # covered on top of the public API res = torch._unique(inp, sorted=sort, return_inverse=return_inverse) assert_only_first_requires_grad(res) # This looks public but is actually manually deleted from the # torch namespace in torch/functional.py res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) # We don't test `unique_dim_consecutive` here. # It looks public but the python binding is actually manually disabled in # tools/autograd/gen_python_functions.py res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) def test_custom_function_cycle(self): class MyFn(Function): @staticmethod def forward(ctx, x, metadata): x = x.clone() ctx.meta = metadata ctx.save_for_backward(x) return x @staticmethod def backward(ctx, gO): x, = ctx.saved_tensors self.assertEqual(x, 3.14) self.assertEqual(ctx.meta["foo"], 3.14) return gO * x, None def get_refs(with_backward): a = torch.tensor(3.14, requires_grad=True) metadata = {} out = MyFn.apply(a, metadata) metadata["foo"] = out if with_backward: out.sum().backward() self.assertEqual(a.grad, a) return torch._C._WeakTensorRef(out) with disable_gc(): ref = get_refs(False) self.assertFalse(ref.expired()) gc.collect() self.assertTrue(ref.expired()) # The backward clears the saved_variables but not the __dict__ with disable_gc(): ref = get_refs(True) self.assertFalse(ref.expired()) gc.collect() self.assertTrue(ref.expired()) def test_input_buffer_accum(self): leaf = torch.rand(2, 2, requires_grad=True) # An op that returns sparse gradients ind = torch.tensor([[0, 0]], dtype=torch.long) out2 = leaf.gather(0, ind, sparse_grad=True) # An op that returns the gradients as-is out1 = leaf.clone() grad_out1_original = torch.rand_like(out1) grad_out1 = grad_out1_original.clone() grad_out2 = torch.rand_like(out2) torch.autograd.backward((out1, out2), (grad_out1, grad_out2)) # Given gradients should not be modified inplace self.assertEqual(grad_out1, grad_out1_original) def test_no_unnecessary_unwrapping(self): a = torch.randn(5, requires_grad=True) a_orig = a.detach().clone() b = a * a c = a * b d = torch.exp(a) # a is leaf self.assertIs(b.grad_fn._saved_self, a) self.assertIs(b.grad_fn._saved_other, a) self.assertIs(c.grad_fn._saved_self, a) # b is not an output self.assertIs(c.grad_fn._saved_other, b) # d is an output self.assertEqual(d.grad_fn._saved_result, d) self.assertIsNot(d.grad_fn._saved_result, d) c.sum().backward() with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): c.grad_fn._saved_self # a is left untouched self.assertEqual(a, a_orig) def test_saved_variable_version_counter(self): a = torch.rand(2, requires_grad=True) b = torch.exp(a) b_unpacked = b.grad_fn._saved_result self.assertEqual(b, b_unpacked) self.assertEqual(b._version, b_unpacked._version) with torch.no_grad(): b += 1 self.assertEqual(b, b_unpacked) self.assertEqual(b._version, b_unpacked._version) def test_saved_variable_packing_unpacking_saved_original_with_hooks(self): # Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks # The saved_original / did_not_save_original distinction corresponds to the `save_original` # attribute of `SavedVariable`. def test(get_input, is_leaf): a = get_input() grad_fn = a.grad_fn y = a * a y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2) self.assertEqual(a, y.grad_fn._saved_self) if not is_leaf: self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn) y.sum().backward() else: y.sum().backward() self.assertEqual(2 * a, a.grad) a = get_input() grad_fn = a.grad_fn y = a * a y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x) self.assertEqual(2 * a, y.grad_fn._saved_self) if not is_leaf: self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn) y.sum().backward() else: y.sum().backward() self.assertEqual(3 * a, a.grad) # double backward a = get_input() grad_fn = a.grad_fn y = a ** 3 y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x) s = torch.sum(y) g, = torch.autograd.grad(s, (a, ), create_graph=True) if not is_leaf: self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn) g.sum().backward() else: g.sum().backward() self.assertEqual(6 * a, a.grad) a = get_input() y = a * a y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1) with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"): print(y.grad_fn._saved_self) a = get_input() y = a * a with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"): y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x) a = get_input() y = a * a with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"): y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x) def inplace_double(x): x *= 2 return x a = get_input() t = a * a with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."): t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2) # leaf test(lambda: torch.randn(5, requires_grad=True), True) # not leaf, not output test(lambda: (1 + torch.randn(5, requires_grad=True)), False) def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self): # Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks # The saved_original / did_not_save_original distinction corresponds to the `save_original` # attribute of `SavedVariable`. a = torch.randn(5, requires_grad=True) y = torch.exp(a) y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x) self.assertEqual(y, y.grad_fn._saved_result) self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn) y.sum().backward() self.assertEqual(a.grad, y) def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self): # Tests that default hooks are properly registered, used and reset # The saved_original / did_not_save_original distinction corresponds to the `save_original` # attribute of `SavedVariable`. # See also: # - test_saved_variable_packing_unpacking_saved_original_with_hooks def pack(x): warnings.warn("pack") return x with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x): a = torch.ones(5, requires_grad=True) warnings.simplefilter('always') with warnings.catch_warnings(record=True) as w: y = a * a # should raise two warnings from a being saved twice self.assertEqual(len(w), 2) with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x): a = torch.randn(5, requires_grad=True) y = a * a self.assertEqual(a, y.grad_fn._saved_self) self.assertEqual(a, y.grad_fn._saved_other) y.sum().backward() self.assertEqual(2 * a, a.grad) with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2): a = torch.randn(5, requires_grad=True) y = a * a self.assertEqual(a, y.grad_fn._saved_self) self.assertEqual(a, y.grad_fn._saved_other) y.sum().backward() self.assertEqual(2 * a, a.grad) with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x): a = torch.randn(5, requires_grad=True) y = a * a self.assertEqual(2 * a, y.grad_fn._saved_self) self.assertEqual(2 * a, y.grad_fn._saved_other) y.sum().backward() self.assertEqual(4 * a, a.grad) # Exited hooks correctly a = torch.randn(5, requires_grad=True) y = a * a self.assertEqual(a, y.grad_fn._saved_self) self.assertEqual(a, y.grad_fn._saved_other) y.sum().backward() self.assertEqual(2 * a, a.grad) def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self): # See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x): a = torch.randn(5, requires_grad=True) y = torch.exp(a) self.assertEqual(y, y.grad_fn._saved_result) y.sum().backward() self.assertEqual(a.grad, y) def test_setting_default_saved_variable_hooks_twice_should_fail(self): with self.assertRaisesRegex(RuntimeError, "Setting default hooks but they have already been set. "): with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x): with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x): pass def test_pack_hook_with_inplace_modification_should_fail(self): a = torch.randn(5, requires_grad=True) def inc(x): x += 1 return x with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x): with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."): y = torch.exp(a) y = torch.exp(a) with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."): y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x) def test_saving_variable_to_disk(self): with tempfile.TemporaryDirectory() as tmp_dir: def pack(x): name = os.path.join(tmp_dir, str(uuid.uuid4())) torch.save(x, name) return name def unpack(name): return torch.load(name) with torch.autograd.graph.saved_tensors_hooks(pack, unpack): a = torch.ones(5, requires_grad=True) y = a * a self.assertEqual(a, y.grad_fn._saved_self) y.sum().backward() self.assertEqual(2 * a, a.grad) def test_default_saved_variable_hooks_double_backward(self): with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x): a = torch.randn(5, requires_grad=True) y = a ** 3 s = torch.sum(y) g, = torch.autograd.grad(s, (a, ), create_graph=True) g.sum().backward() self.assertEqual(6 * a, a.grad) with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x): a = torch.randn(5, requires_grad=True) y = a ** 3 s = torch.sum(y) g, = torch.autograd.grad(s, (a, ), create_graph=True) g.sum().backward() # factor 2 because only a is saved once self.assertEqual(6 * 2 * a, a.grad) a = torch.randn(5, requires_grad=True) y = a ** 3 s = torch.sum(y) with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x): g, = torch.autograd.grad(s, (a, ), create_graph=True) g.sum().backward() # factor 4 because pow_backward is grad * (exp * self.pow(exp - 1)) # so grad is saved and self (i.e. a) is saved self.assertEqual(6 * 4 * a, a.grad) with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x): a = torch.randn(5, requires_grad=True) y = a ** 3 s = torch.sum(y) g, = torch.autograd.grad(s, (a, ), create_graph=True) g.sum().backward() # combining the two above blocks: 2 * 4 = 8 # note that in that sense, a is saved twice self.assertEqual(6 * 8 * a, a.grad) def test_graph_save_on_cpu(self): def test(get_input, cuda, pin_memory): with torch.autograd.graph.save_on_cpu(pin_memory): a = get_input() if cuda: a.cuda() y = a * a self.assertEqual(a, y.grad_fn._saved_self) self.assertEqual(a, y.grad_fn._saved_other) self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype) self.assertEqual(a.layout, y.grad_fn._saved_self.layout) if y.is_sparse: y = y.to_dense() y.sum().backward() self.assertEqual(2 * a, a.grad) for cuda in [False] + ([True] if torch.cuda.is_available() else []): for pin_memory in [True, False]: # FloatTensor test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory) # DoubleTensor test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory) # Sparse tensor x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True) test(lambda: x, cuda, pin_memory) @unittest.skipIf(not TEST_CUDA, "test requires CUDA") def test_graph_save_on_cpu_cuda(self): def f(x): a = x + 1 return a * a # with grad a = torch.ones(1, requires_grad=True, device="cuda") y = f(a) memory_with_grad = torch.cuda.memory_allocated() del a del y # without grad a = torch.ones(1, requires_grad=True, device="cuda") with torch.no_grad(): y = f(a) memory_without_grad = torch.cuda.memory_allocated() self.assertGreater(memory_with_grad, memory_without_grad) del a del y # with hooks with torch.autograd.graph.save_on_cpu(): a = torch.ones(1, requires_grad=True, device="cuda") y = f(a) memory_with_hooks = torch.cuda.memory_allocated() self.assertEqual(memory_with_hooks, memory_without_grad) def index_perm_variable(shape, max_indices): if not isinstance(shape, tuple): shape = (shape,) index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape) return index def bernoulli_scalar(): return torch.tensor(0, dtype=torch.uint8).bernoulli_() class TestAutogradFunctional(TestCase): def _assert_same_struct(self, res, base): # base and res should be Tensors or tuple of Tensors with the same size if isinstance(base, torch.Tensor): self.assertTrue(isinstance(res, torch.Tensor)) self.assertEqual(base.size(), res.size()) elif isinstance(base, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(base), len(res)) for el_base, el_res in zip(base, res): self.assertTrue(isinstance(el_base, torch.Tensor)) self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertEqual(el_base.size(), el_res.size()) else: # Wrong base raise RuntimeError("The base given to `_assert_same_struct` doesn't have" " the right structure.") def _assert_interleaved_struct(self, res, base1, base2): # base1 and base2 can be Tensors or tuples of Tensors. # If they are tuples, res should be a tuple as well. # The indexing works as follows for base1, base2 being # - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l]) # - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l]) # - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l]) # - Tensor, Tensor: res[k][l] = (base1[k], base2[l]) if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor): self.assertTrue(isinstance(res, torch.Tensor)) self.assertEqual(res.size(), base1.size() + base2.size()) elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base1)) for el_res, el_base1 in zip(res, base1): self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertTrue(isinstance(el_base1, torch.Tensor)) self.assertEqual(el_res.size(), el_base1.size() + base2.size()) elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base2)) for el_res, el_base2 in zip(res, base2): self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertTrue(isinstance(el_base2, torch.Tensor)) self.assertEqual(el_res.size(), base1.size() + el_base2.size()) elif isinstance(base1, tuple) and isinstance(base2, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base1)) for el_res, el_base1 in zip(res, base1): self.assertTrue(isinstance(el_res, tuple)) self.assertEqual(len(res), len(base2)) for el_el_res, el_base2 in zip(el_res, base2): self.assertTrue(isinstance(el_el_res, torch.Tensor)) self.assertTrue(isinstance(el_base2, torch.Tensor)) self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size()) else: # Wrong bases raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have" " the right structure.") def test_vjp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) v = torch.ones(3) with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"): res = autogradF.vjp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"): res = autogradF.vjp(bar, inp, v) with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"): res = autogradF.vjp(foo, inp) with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."): res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp))) with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"): res = autogradF.vjp(foo, inp, v[:2]) res = autogradF.vjp(foo, inp, v)[1] self._assert_same_struct(res, inp) def test_vjp_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.vjp(foo, inp, v, strict=True) res = autogradF.vjp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.vjp(bar, inp, v, strict=True) res = autogradF.vjp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True) res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1], v) def test_vjp_no_grad(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4) with torch.no_grad(): res = autogradF.vjp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) inputs.requires_grad_() v.requires_grad_() with torch.no_grad(): res = autogradF.vjp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_vjp_output(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4) res = autogradF.vjp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def adder(x, y): return 2 * x + 3 * y inputs = (torch.rand(2), torch.rand(2)) v = torch.ones(2) out, vjp_val = autogradF.vjp(adder, inputs, v) self._assert_same_struct(vjp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(vjp_val[0].grad_fn) self.assertIsNone(vjp_val[1].grad_fn) def adder(x, y): return 2 * x + 3 * y, x + y inputs = (torch.rand(2), torch.rand(2)) v = (torch.tensor([1., 0.]), torch.tensor([1., 0.])) out, vjp_val = autogradF.vjp(adder, inputs, v) self._assert_same_struct(vjp_val, inputs) self.assertIsNone(out[0].grad_fn) self.assertIsNone(out[1].grad_fn) self.assertIsNone(vjp_val[0].grad_fn) self.assertIsNone(vjp_val[1].grad_fn) def test_vjp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones([]) res = autogradF.vjp(reducer, inputs, v) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) res = autogradF.vjp(reducer, inputs) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) v = torch.ones(4) res = autogradF.vjp(expander, inputs, v) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) def test_vjp_create_graph(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(2, 2, dtype=torch.double) v = torch.ones(2, dtype=torch.double) inputs.requires_grad_() v.requires_grad_() res = autogradF.vjp(reducer, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v)) def adder(x, y): return 2 * x + 3 * y, x * y inputs = (torch.rand(2, dtype=torch.double, requires_grad=True), torch.rand(2, dtype=torch.double, requires_grad=True)) v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True), torch.tensor([1., 0.], dtype=torch.double, requires_grad=True)) gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True) return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_jvp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"): res = autogradF.jvp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"): res = autogradF.jvp(bar, inp, v) with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"): res = autogradF.jvp(foo, inp) with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."): res = autogradF.jvp(foo, inp, (v, v)) with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"): res = autogradF.jvp(foo, inp, v[:2]) res = autogradF.jvp(foo, inp, v)[1] self._assert_same_struct(res, foo(inp)) def test_jvp_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.jvp(foo, inp, v, strict=True) res = autogradF.jvp(foo, inp, v, strict=False) self._assert_same_struct(res[1], res[0]) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.jvp(bar, inp, v, strict=True) res = autogradF.jvp(bar, inp, v, strict=False) self._assert_same_struct(res[1], res[0]) self.assertEqual(res[1].abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True) res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1], v) def test_jvp_no_grad(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4, 4) with torch.no_grad(): res = autogradF.jvp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) inputs.requires_grad_() v.requires_grad_() with torch.no_grad(): res = autogradF.jvp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_jvp_output(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.jvp(reducer, inputs, v) self._assert_same_struct(res[1], res[0]) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def adder(x, y): return 2 * x + 3 * y inputs = (torch.rand(2), torch.rand(2)) v = (torch.ones(2), torch.ones(2)) out, jvp_val = autogradF.jvp(adder, inputs, v) self._assert_same_struct(jvp_val, out) self.assertIsNone(out.grad_fn) self.assertIsNone(jvp_val[0].grad_fn) self.assertIsNone(jvp_val[1].grad_fn) def adder(x, y): return 2 * x + 3 * y, x + y inputs = (torch.rand(2), torch.rand(2)) v = (torch.tensor([1., 0.]), torch.tensor([1., 0.])) out, jvp_val = autogradF.jvp(adder, inputs, v) self._assert_same_struct(jvp_val, out) self.assertIsNone(out[0].grad_fn) self.assertIsNone(out[1].grad_fn) self.assertIsNone(jvp_val[0].grad_fn) self.assertIsNone(jvp_val[1].grad_fn) def test_jvp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.jvp(reducer, inputs, v) self._assert_same_struct(res[0], torch.zeros([])) self._assert_same_struct(res[1], res[0]) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) v = torch.ones([]) res = autogradF.jvp(expander, inputs, v) self._assert_same_struct(res[0], torch.zeros(4)) self._assert_same_struct(res[1], res[0]) res = autogradF.jvp(expander, inputs) self._assert_same_struct(res[0], torch.zeros(4)) self._assert_same_struct(res[1], res[0]) def test_jvp_create_graph(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(2, 2, dtype=torch.double) v = torch.ones(2, 2, dtype=torch.double) inputs.requires_grad_() v.requires_grad_() res = autogradF.jvp(reducer, inputs, v, create_graph=True) self._assert_same_struct(res[1], res[0]) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v)) def adder(x, y): return 2 * x + 3 * y, x * y inputs = (torch.rand(2, dtype=torch.double, requires_grad=True), torch.rand(2, dtype=torch.double, requires_grad=True)) v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True), torch.tensor([1., 0.], dtype=torch.double, requires_grad=True)) gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True) return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def _test_construct_standard_basis_for(self, inputs): numels = tuple(tensor.numel() for tensor in inputs) results = autogradF._construct_standard_basis_for(inputs, numels) for result, inp in zip(results, inputs): self.assertEqual(result.dtype, inp.dtype) self.assertEqual(result.device, inp.device) results = torch.cat([result.to(device='cpu', dtype=torch.float) for result in results], dim=1) expected = torch.eye(results[0].shape[0], dtype=torch.float) self.assertEqual(results, expected) def test_construct_standard_basis_for(self): test_cases = [ (torch.randn(2, 3),), (torch.randn(1),), (torch.randn([]),), (torch.randn(1), torch.randn([]), torch.randn([])), (torch.randn(2), torch.randn(3), torch.randn([])), (torch.randn(2), torch.randn([]), torch.randn(3)), (torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)), (torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)), ] for inputs in test_cases: self._test_construct_standard_basis_for(inputs) @unittest.skipIf(not TEST_CUDA, "test requires CUDA") def test_construct_standard_basis_for_cuda(self): test_cases = [ (torch.randn(2), torch.randn(3, device='cuda')), (torch.randn(3, device='cuda'), torch.randn(2)), ] for inputs in test_cases: self._test_construct_standard_basis_for(inputs) def _test_vectorize_raises_no_warnings(self, api): # vmap is an experimental prototype. When someone calls torch.vmap, # it raises a python warning. This test checks that # autogradF.{jacobian, hessian} don't raise that experimental prototype # warning; it is not nice for a public-facing API to raise a warning # no matter how it is called. def foo(a): return (a ** 2).sum() x = torch.randn(3) with warnings.catch_warnings(record=True) as wa: result = api(foo, x, vectorize=True) self.assertEqual(len(wa), 0) def test_jacobian_vectorize_raises_no_warnings(self): return self._test_vectorize_raises_no_warnings(autogradF.jacobian) def test_hessian_vectorize_raises_no_warnings(self): return self._test_vectorize_raises_no_warnings(autogradF.hessian) def _test_jacobian_err_check(self, vectorize): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"): res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"): res = autogradF.jacobian(bar, inp, vectorize=vectorize) res = autogradF.jacobian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, foo(inp), inp) def foo(a, b): return b, 3 * a.narrow(0, 0, 3) inp = (torch.rand(4), torch.rand(5)) res = autogradF.jacobian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, foo(*inp), inp) def test_jacobian_err_check(self): return self._test_jacobian_err_check(vectorize=False) def test_jacobian_err_check_vectorize(self): return self._test_jacobian_err_check(vectorize=True) def test_jacobian_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.jacobian(foo, inp, strict=True) res = autogradF.jacobian(foo, inp, strict=False) self._assert_interleaved_struct(res, foo(inp), inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."): res = autogradF.jacobian(bar, inp, strict=True) res = autogradF.jacobian(bar, inp, strict=False) self._assert_interleaved_struct(res, foo(inp), inp) self.assertEqual(res.abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.jacobian(foo, inp, create_graph=True, strict=True) res = autogradF.jacobian(foo, inp, create_graph=True, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res, torch.eye(4)) def test_jacobian_err_check_strict_vectorize(self): def foo(x): return x inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "not supported together"): res = autogradF.jacobian(foo, inp, strict=True, vectorize=True) def test_jacobian_no_grad(self): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4) with torch.no_grad(): res = autogradF.jacobian(exp_reducer, inputs) self.assertIsNone(res.grad_fn) self.assertNotEqual(res, torch.zeros(4, 4)) with torch.no_grad(): res = autogradF.jacobian(exp_reducer, inputs, create_graph=True) self.assertIsNotNone(res.grad_fn) self.assertNotEqual(res, torch.zeros(4, 4)) def _test_jacobian_output(self, vectorize): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4) res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, exp_reducer(inputs), inputs) self.assertIsNone(res.grad_fn) def identity(x): return x.clone() inputs = torch.rand(4) res = autogradF.jacobian(identity, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, identity(inputs), inputs) self.assertIsNone(res.grad_fn) self.assertEqual(res, torch.eye(4)) def add_exp_reducer(x, y): return (x + y.exp()).sum(dim=1) inputs = (torch.rand(4, 4), torch.rand(4, 4)) res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def test_jacobian_output(self): self._test_jacobian_output(vectorize=False) def test_jacobian_output_vectorize(self): self._test_jacobian_output(vectorize=True) def _test_jacobian_scalar(self, vectorize): def reducer(x): return x.sum() inputs = torch.rand(4, 4) res = autogradF.jacobian(reducer, inputs, vectorize=vectorize) self._assert_same_struct(res, inputs) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) res = autogradF.jacobian(expander, inputs, vectorize=vectorize) self._assert_same_struct(res, torch.zeros(4)) def test_jacobian_scalar(self): self._test_jacobian_scalar(vectorize=False) def test_jacobian_scalar_vectorize(self): self._test_jacobian_scalar(vectorize=True) def _test_jacobian_create_graph(self, vectorize): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True) res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, exp_reducer(inputs), inputs) self.assertIsNotNone(res.grad_fn) gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) def add_exp_reducer(x, y): return (x + y).exp().sum(dim=1) inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True), torch.rand(4, 4, dtype=torch.double, requires_grad=True)) res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) def foo(x, y): x = x.cos() val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize) res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum() res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum() return res gradcheck(foo, inputs) gradgradcheck(foo, inputs) def test_jacobian_create_graph(self): self._test_jacobian_create_graph(vectorize=False) def test_jacobian_create_graph_vectorize(self): self._test_jacobian_create_graph(vectorize=True) def _check_jacobian_vectorize_correctness(self, f, inputs): expected = autogradF.jacobian(f, inputs, vectorize=False) result = autogradF.jacobian(f, inputs, vectorize=True) self.assertEqual(result, expected) def test_jacobian_vectorize_correctness_simple(self): def f(x): return 3 * x ** 2 x = torch.randn(2, 3, 5) self._check_jacobian_vectorize_correctness(f, x) def test_jacobian_vectorize_correctness_multi_input(self): def f(x, y): return (x.cos() * x) @ y.sin() x = torch.randn(2, 3) y = torch.randn(3, 5) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_multi_input_multi_output(self): def f(x, y): return (x * x) @ y, x @ (x.sum(1) * y), y.sum() x = torch.randn(5, 3) y = torch.randn(3, 5) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_unrelated_outputs(self): def f(x, y): return x, y, x, y x = torch.randn(2) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_zero_dim(self): # zero-dim output def f(x, y): return x.sum(), y.sum(), x * y x = torch.randn(3) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) # zero-dim input def g(x): return torch.stack([x, x, x]) x = torch.randn([]) self._check_jacobian_vectorize_correctness(g, x) # Mixed zero-dim input / zero-dim output def h(x, y): return y.sum(), x * y x = torch.randn([]) y = torch.randn(1) self._check_jacobian_vectorize_correctness(h, (x, y)) @unittest.skipIf(not TEST_CUDA, "test requires CUDA") def test_jacobian_vectorize_correctness_different_devices(self): def f(x, y): return x * y, (x * y).cuda() x = torch.randn(3) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_different_dtype(self): def f(x, y): return (x * y).float(), (x * y).double() x = torch.randn(3) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) def _check_hessian_vectorize_correctness(self, f, inputs): expected = autogradF.hessian(f, inputs, vectorize=False) result = autogradF.hessian(f, inputs, vectorize=True) self.assertEqual(result, expected) def test_hessian_vectorize_correctness_simple(self): def f(x): return (3 * x ** 2).sum() x = torch.randn(2, 3, 5) self._check_hessian_vectorize_correctness(f, x) def test_hessian_vectorize_correctness_multi_input(self): def f(x, y, z): return ((x.relu() * x) @ y.sin() @ z).sum() x = torch.randn(2, 3) y = torch.randn(3, 5) z = torch.randn(5, 5) self._check_hessian_vectorize_correctness(f, (x, y, z)) def test_hessian_vectorize_correctness_unrelated_outputs(self): # output unrelated to one input def f(x, y): return (x ** 2).sum() x = torch.randn(2) y = torch.randn(3) self._check_hessian_vectorize_correctness(f, (x, y)) # output unrelated to all inputs def f(x, y): return torch.randn([]) x = torch.randn(2) y = torch.randn(3) self._check_hessian_vectorize_correctness(f, (x, y)) def _test_hessian_err_check(self, vectorize): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) def bar3(a): return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3) inp = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"): res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"): res = autogradF.hessian(bar, inp, vectorize=vectorize) err_msg_out = "The Tensor returned by the function given to hessian should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.hessian(bar2, inp, vectorize=vectorize) with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"): res = autogradF.hessian(bar3, inp, vectorize=vectorize) res = autogradF.hessian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, inp, inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) res = autogradF.hessian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, inp, inp) def test_hessian_err_check(self): self._test_hessian_err_check(vectorize=False) def test_hessian_err_check_vectorize(self): self._test_hessian_err_check(vectorize=True) def test_hessian_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.hessian(foo, inp, strict=True) res = autogradF.hessian(foo, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"): res = autogradF.hessian(bar, inp, strict=True) res = autogradF.hessian(bar, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.hessian(bar2, inp, strict=True) res = autogradF.hessian(bar2, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) def test_hessian_err_check_strict_vectorize(self): def foo(x): return (x ** 3).sum() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "not supported together"): res = autogradF.hessian(foo, inp, strict=True, vectorize=True) def test_hessian_no_grad(self): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2) with torch.no_grad(): res = autogradF.hessian(pow_reducer, inputs) self.assertIsNone(res[0][0].grad_fn) self.assertIsNone(res[0][1].grad_fn) self.assertIsNone(res[1][0].grad_fn) self.assertIsNone(res[1][1].grad_fn) self.assertNotEqual(res, torch.zeros(2, 2, 2)) with torch.no_grad(): res = autogradF.hessian(pow_reducer, inputs, create_graph=True) self.assertIsNotNone(res[0][0].grad_fn) self.assertIsNotNone(res[0][1].grad_fn) self.assertIsNotNone(res[1][0].grad_fn) self.assertIsNotNone(res[1][1].grad_fn) self.assertNotEqual(res, torch.zeros(2, 2, 2)) def _test_hessian_output(self, vectorize): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2) res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNone(res.grad_fn) def add_pow_reducer(x, y): return (x + y).pow(3).sum() inputs = (torch.rand(2, 2), torch.rand(2, 2)) res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNone(res[0][0].grad_fn) self.assertIsNone(res[0][1].grad_fn) self.assertIsNone(res[1][0].grad_fn) self.assertIsNone(res[1][1].grad_fn) def test_hessian_output(self): self._test_hessian_output(vectorize=False) def test_hessian_output_vectorize(self): self._test_hessian_output(vectorize=True) def _test_hessian_scalar(self, vectorize): def reducer(x): return x.sum() inputs = torch.rand(4, 4) res = autogradF.hessian(reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) inputs = torch.rand([]) res = autogradF.hessian(reducer, inputs, vectorize=vectorize) self._assert_same_struct(res, inputs) def bad_reducer(x): return x.sum().view(1, 1, 1) inputs = torch.rand(4, 4) res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) def test_hessian_scalar(self): return self._test_hessian_scalar(vectorize=False) def test_hessian_scalar_vectorize(self): return self._test_hessian_scalar(vectorize=True) def _test_hessian_create_graph(self, vectorize): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True) res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNotNone(res.grad_fn) gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs) gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs) def add_pow_reducer(x, y): return (x + y).pow(3).sum() inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True), torch.rand(2, 2, dtype=torch.double, requires_grad=True)) res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNotNone(res[0][0].grad_fn) self.assertIsNotNone(res[0][1].grad_fn) self.assertIsNotNone(res[1][0].grad_fn) self.assertIsNotNone(res[1][1].grad_fn) def flatten(inp): return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1) gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs) gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs) def foo(x, y): x = x.cos() val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize) res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum() res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum() return res gradcheck(foo, inputs) gradgradcheck(foo, inputs) def test_hessian_create_graph(self): self._test_hessian_create_graph(vectorize=False) def test_hessian_create_graph_vectorize(self): self._test_hessian_create_graph(vectorize=True) def test_vhp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"): res = autogradF.vhp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"): res = autogradF.vhp(bar, inp, v) err_msg_out = "The Tensor returned by the function given to vhp should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.vhp(bar2, inp, v) with self.assertRaisesRegex(RuntimeError, "v has invalid size:"): res = autogradF.vhp(foo, inp, torch.rand(5)) with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"): res = autogradF.vhp(foo, inp, (v, 2)) res = autogradF.vhp(foo, inp, v) self._assert_same_struct(res[1], inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) v = (torch.rand(4), torch.rand(5)) res = autogradF.vhp(foo, inp, v) self._assert_same_struct(res[1], inp) def test_vhp_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.vhp(foo, inp, v, strict=True) res = autogradF.vhp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.vhp(bar, inp, v, strict=True) res = autogradF.vhp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.vhp(bar2, inp, v, strict=True) res = autogradF.vhp(bar2, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) def test_vhp_no_grad(self): def reducer(x): return x.exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) with torch.no_grad(): res = autogradF.vhp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) with torch.no_grad(): res = autogradF.vhp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_vhp_output(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.vhp(foo, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3), torch.rand(4)) v = (torch.ones(3), torch.ones(4)) out, vhp_val = autogradF.vhp(bar, inputs, v) self._assert_same_struct(vhp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(vhp_val[0].grad_fn) self.assertIsNone(vhp_val[1].grad_fn) def test_vhp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.vhp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) inputs = torch.rand([]) v = torch.rand([]) res = autogradF.vhp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) res = autogradF.vhp(reducer, inputs) self._assert_same_struct(res[1], inputs) def bad_reducer(x): return x.sum().view(1, 1, 1) inputs = torch.rand(4, 4) v = torch.rand(4, 4) res = autogradF.vhp(bad_reducer, inputs, v) self._assert_same_struct(res[1], inputs) def test_vhp_create_graph(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True) v = torch.ones(4, 4, dtype=torch.double, requires_grad=True) res = autogradF.vhp(foo, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3, dtype=torch.double, requires_grad=True), torch.rand(4, dtype=torch.double, requires_grad=True)) v = (torch.ones(3, dtype=torch.double, requires_grad=True), torch.ones(4, dtype=torch.double, requires_grad=True)) out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True) self._assert_same_struct(vhp_val, inputs) self.assertIsNotNone(out.grad_fn) self.assertIsNotNone(vhp_val[0].grad_fn) self.assertIsNotNone(vhp_val[1].grad_fn) gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True) return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_hvp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) inp = torch.rand(4) v = torch.rand(4) res = autogradF.hvp(foo, inp, v) with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"): res = autogradF.hvp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"): res = autogradF.hvp(bar, inp, v) err_msg_out = "The Tensor returned by the function given to hvp should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.hvp(bar2, inp, v) with self.assertRaisesRegex(RuntimeError, "v has invalid size:"): res = autogradF.hvp(foo, inp, torch.rand(5)) with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"): res = autogradF.hvp(foo, inp, (v, 2)) res = autogradF.hvp(foo, inp, v) self._assert_same_struct(res[1], inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) v = (torch.rand(4), torch.rand(5)) res = autogradF.hvp(foo, inp, v) self._assert_same_struct(res[1], inp) def test_hvp_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.hvp(foo, inp, v, strict=True) res = autogradF.hvp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.hvp(bar, inp, v, strict=True) res = autogradF.hvp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.hvp(bar2, inp, v, strict=True) res = autogradF.hvp(bar2, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) def test_hvp_no_grad(self): def reducer(x): return x.exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) with torch.no_grad(): res = autogradF.hvp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) with torch.no_grad(): res = autogradF.hvp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_hvp_output(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.hvp(foo, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3), torch.rand(4)) v = (torch.ones(3), torch.ones(4)) out, hvp_val = autogradF.hvp(bar, inputs, v) self._assert_same_struct(hvp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(hvp_val[0].grad_fn) self.assertIsNone(hvp_val[1].grad_fn) def test_hvp_scalar(self): def reducer(x): return x.exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.hvp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) inputs = torch.rand([]) v = torch.rand([]) res = autogradF.hvp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) res = autogradF.hvp(reducer, inputs) self._assert_same_struct(res[1], inputs) def bad_reducer(x): return x.exp().sum().view(1, 1, 1) inputs = torch.rand(4, 4) v = torch.rand(4, 4) res = autogradF.hvp(bad_reducer, inputs, v) self._assert_same_struct(res[1], inputs) def test_hvp_create_graph(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True) v = torch.ones(4, 4, dtype=torch.double, requires_grad=True) res = autogradF.hvp(foo, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3, dtype=torch.double, requires_grad=True), torch.rand(4, dtype=torch.double, requires_grad=True)) v = (torch.ones(3, dtype=torch.double, requires_grad=True), torch.ones(4, dtype=torch.double, requires_grad=True)) out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True) self._assert_same_struct(hvp_val, inputs) self.assertIsNotNone(out.grad_fn) self.assertIsNotNone(hvp_val[0].grad_fn) self.assertIsNotNone(hvp_val[1].grad_fn) gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True) return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_jacobian_match_vjp_jvp(self): def foo(x): return x ** 3 + x.sum() inputs = torch.rand(4) v = torch.rand(4) jac = autogradF.jacobian(foo, inputs) jvp = autogradF.jvp(foo, inputs, v)[1] vjp = autogradF.vjp(foo, inputs, v)[1] self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1)) self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0)) def test_hessian_match_vhp_hvp(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4) v = torch.rand(4) hes = autogradF.hessian(foo, inputs) hvp = autogradF.hvp(foo, inputs, v)[1] vhp = autogradF.vhp(foo, inputs, v)[1] self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1)) self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0)) class TestAutogradForwardMode(TestCase): def tearDown(self): # Ensure that a failing test won't make others fail while fwAD._current_level >= 0: fwAD.exit_dual_level() super().tearDown() def test_forward_level_cleanup(self): def get_tensor_and_weak_ref(): # Create a new Tensor and weak reference t = torch.rand(2, requires_grad=True) return t, torch._C._WeakTensorRef(t) # Sanity check that the helper function works as expected t, t_ref = get_tensor_and_weak_ref() self.assertFalse(t_ref.expired()) del t self.assertTrue(t_ref.expired()) # Main test code foo = torch.rand(2) with fwAD.dual_level(): tangent, tangent_ref = get_tensor_and_weak_ref() self.assertFalse(tangent_ref.expired()) dual = fwAD.make_dual(foo, tangent) self.assertFalse(tangent_ref.expired()) # Make sure that the tangent we provided has been re-used as is self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent) # Make sure that dual is keeping the tangent alive del tangent self.assertFalse(tangent_ref.expired()) # Make sure that the dual level does not keep the c++ # version of the tangent alive del dual self.assertTrue(tangent_ref.expired()) def test_size_check(self): foo = torch.rand(2) tangent = torch.rand(3) with fwAD.dual_level(): with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"): dual = fwAD.make_dual(foo, tangent) dual = fwAD.make_dual(foo, tangent[1:]) # The following test functions want to ensure all the following behaviors: # - Ensure that default level system in the python binding works # - Ensure that only level 0 exists and nesting is properly disabled # - Ensure that printing works fine # - Ensure that basic packing/unpacking works # - Ensure that advanced packing/unpacking works # - For memory / version counter share # - For backward AD (regular ops) # - Ensure that view + inplace for both modes work fine # - Ensure we do proper cleanup on exit of a level def test_default_level(self): foo = torch.rand(2) bar = torch.rand(2) with fwAD.dual_level(): baz = fwAD.make_dual(foo, bar) baz_primal, baz_tangent = fwAD.unpack_dual(baz) self.assertEqual(baz_primal, foo) # We don't actually need to enforce that these two are the exact same python # object, feel free to relax in the future self.assertIs(baz_tangent, bar) baz_primal, baz_tangent = fwAD.unpack_dual(baz) self.assertEqual(baz_primal, foo) self.assertEqual(baz_tangent, None) def test_nested_level(self): with fwAD.dual_level() as level: # For now only level 0 exists self.assertEqual(level, 0) with fwAD.dual_level(): with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"): nest_level = fwAD.enter_dual_level() def test_print(self): with fwAD.dual_level() as level: a = torch.rand(3) self.assertFalse("tangent=" in str(a)) b = fwAD.make_dual(a, torch.rand(3)) self.assertFalse("tangent=" in str(a)) self.assertTrue("tangent=" in str(b)) b_primal, b_tangent = fwAD.unpack_dual(b) self.assertFalse("tangent=" in str(b_primal)) self.assertFalse("tangent=" in str(b_tangent)) def test_basic_packing_unpacking(self): foo = torch.rand(2) bar = torch.rand(2) with fwAD.dual_level(): baz = fwAD.make_dual(foo, bar) baz_primal, baz_tangent = fwAD.unpack_dual(baz) self.assertEqual(baz_primal, foo) self.assertIs(baz_tangent, bar) # Check that packing/unpacking did not change the input foo_primal, foo_tangent = fwAD.unpack_dual(foo) self.assertEqual(foo_primal, foo) self.assertIsNone(foo_tangent) def test_advanced_packing_unpacking(self): foo = torch.rand(2) bar = torch.ones(2) # Memory and version counter check with fwAD.dual_level(): dual = fwAD.make_dual(foo, bar) # Ensure that they are sharing memory and version counter self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr()) # Ensure we properly share the version counter self.assertEqual(foo._version, dual._version) foo.add_(1) self.assertEqual(foo._version, dual._version) # Unpacking should only create aliases as well dual_primal, dual_tangent = fwAD.unpack_dual(dual) self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr()) self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr()) # And the tangent is actually re-used as-is so it is still the same Tensor self.assertIs(dual_tangent, bar) # Ensure we properly share the version counter self.assertEqual(foo._version, dual_primal._version) foo.add_(1) self.assertEqual(foo._version, dual_primal._version) self.assertEqual(bar._version, dual_tangent._version) bar.add_(1) self.assertEqual(bar._version, dual_tangent._version) # backward mode check with fwAD.dual_level(): foo.requires_grad_() bar.requires_grad_() # Check that backward gradients properly propagates through packing/unpacking dual = fwAD.make_dual(foo, bar) p, t = fwAD.unpack_dual(dual) gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True) self.assertEqual(gfoo, torch.ones_like(foo)) self.assertIsNone(gbar) gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True) self.assertIsNone(gfoo) self.assertEqual(gbar, torch.ones_like(bar)) # Check that forward gradients are impacted by detach() detached_dual = dual.detach() out = detached_dual * 2 p, t = fwAD.unpack_dual(out) self.assertFalse(p.requires_grad) self.assertEqual(p, foo * 2) self.assertIsNone(t) # Check that forward gradients are not impacted by no_grad with torch.no_grad(): out = dual * 3 p, t = fwAD.unpack_dual(out) self.assertFalse(p.requires_grad) self.assertFalse(t.requires_grad) self.assertEqual(p, foo * 3) self.assertEqual(t, bar * 3) # Check that forward gradients are not impacted by inplace detach dual = dual.clone() dual.detach_() out = dual * 2 p, t = fwAD.unpack_dual(out) self.assertFalse(p.requires_grad) self.assertEqual(p, foo * 2) self.assertIsNone(t) def test_view_inplace_non_differentiable_views(self): original_foo = torch.rand(2, dtype=torch.double) original_bar = torch.ones(2, dtype=torch.double) # Do clones to be able to compare the values updated inplace # with the original content of these Tensors foo = original_foo.clone() bar = original_bar.clone() with fwAD.dual_level(): # Note that in this test, we use "update" to mean computing the right tangent for the dual # All the inplace operations here are expected to update the primal value of the Tensors but # not always their tangents. # Also all mentions of "non differentiable view" here means non forward differentiable view # unless specified otherwise. # See note [Forward Grad View/inplace] for more details on how these views work. # Check that inplace ops do not update non-differentiable views # Non differentiable view dual = fwAD.make_dual(foo, bar) dual *= 2 # Check that non differentiable view's tangent was not updated self.assertIsNone(fwAD.unpack_dual(foo)[1]) # Check that the computed result is correct self.assertEqual(bar, original_bar * 2) self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2) self.assertEqual(foo, original_foo * 2) self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2) # Other non differentiable view dual_primal, dual_tangent = fwAD.unpack_dual(dual) self.assertIsNone(fwAD.unpack_dual(dual_primal)[1]) self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1]) dual_primal *= 2 # Ensure dual's tangent did not change self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4) self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2) dual_tangent *= 2 # Ensure dual's primal did not change self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4) self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4) def test_view_inplace_differentiable_views(self): original_foo = torch.rand(2) original_bar = torch.ones(2) # Do clones to be able to compare the values updated inplace # with the original content of these Tensors foo = original_foo.clone() bar = original_bar.clone() with fwAD.dual_level(): # Check that inplace ops do update differentiable view but stop at non differentiable ones # A non differentiable view dual = fwAD.make_dual(foo, bar) # A differentiable view view = dual.narrow(0, 0, 1) view *= 2 # Check that non differentiable view was not updated self.assertIsNone(fwAD.unpack_dual(foo)[1]) # Check that differentiable view was updated self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.])) self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.])) # Check that we track differentiable view even for Tensors that are not dual baz = torch.rand(2) baz += dual self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1]) # Updates on view should as well baz = torch.rand(2) baz[0] = dual[0] self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0]) # Unused values get a gradient of 0 self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.) # Check that forward non-differentiable views do prevent gradient update baz = torch.rand(2) view = baz.detach() view += dual self.assertIsNone(fwAD.unpack_dual(baz)[1]) def test_grad_cleanup(self): foo = torch.rand(2) bar = torch.rand(2) baz = torch.rand(2) with fwAD.dual_level(): dual = fwAD.make_dual(foo, bar) self.assertIsNone(fwAD.unpack_dual(foo)[1]) self.assertIs(fwAD.unpack_dual(dual)[1], bar) self.assertIsNone(fwAD.unpack_dual(dual)[1]) with fwAD.dual_level(): self.assertIsNone(fwAD.unpack_dual(foo)[1]) new_dual = fwAD.make_dual(foo, baz) dual_primal, dual_tangent = fwAD.unpack_dual(dual) new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual) self.assertEqual(dual_primal, new_dual_primal) self.assertIsNone(dual_tangent) self.assertEqual(new_dual_tangent, baz) def test_detach_view_tracking(self): # Default detach is both forward and backward non-differentiable foo = torch.rand(2) foo_weak = torch._C._WeakTensorRef(foo) out = foo.detach() del foo self.assertTrue(foo_weak.expired()) def test_out_variant(self): with fwAD.dual_level(): foo = fwAD.make_dual(torch.rand(2), torch.rand(2)) bar = torch.rand(2) with self.assertRaisesRegex(RuntimeError, "out= function"): torch.add(bar, bar, out=foo) with self.assertRaisesRegex(RuntimeError, "out= function"): torch.add(foo, bar, out=bar) # Generic device type autograd tests. class TestAutogradDeviceType(TestCase): def test_min_max_median_backprops_to_all_values(self, device): for f in [torch.min, torch.max, torch.median, torch.nanmedian]: x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True) x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True) for x in [x1, x2]: y = f(x) y.backward() self.assertEqual(x.grad.sum(), 1.) self.assertEqual((x.grad == 1 / 3).sum(), 3) def test_cdist(self, device): def _test_euclidean_large_cdist(sizex, sizey=None): if sizey is None: sizey = sizex x = torch.randn(sizex, device=device, dtype=torch.float) y = torch.randn(sizey, device=device, dtype=torch.float) eps = 1e-6 # to avoid extremum x = x - (((x - y) < eps).float() * 2 * eps) x.requires_grad = True y.requires_grad = True dist = torch.cdist(x, y, p=2) # Do a backward pass to check that it is valid for large # matrices loss = dist.sum() loss.backward() _test_euclidean_large_cdist((2000, 5)) # Ensure that cdist backward with p<1 does not produce NaNs def test_cdist_grad_p_lt_1_no_nan(self, device): for p in [0.99, 0.7, 0.5, 0.1, 0.01]: x = torch.randn(1, 2, device=device) y = x.clone().detach() + torch.tensor([[1., 0.]], device=device) x.requires_grad = True y.requires_grad = True result = torch.cdist(x, y, p=p) result.backward(torch.ones_like(result)) self.assertFalse(torch.isnan(x.grad).any()) self.assertFalse(torch.isnan(y.grad).any()) def test_cdist_same_inputs(self, device): # Test to detect issues in cdist gradient calculation # When the distances are 0 sizex = (1, 27, 32) for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(sizex, device=device, dtype=torch.float) dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float) y = x.clone() eps = 1e-6 x.requires_grad = True d = torch.cdist(x, y) d.backward(dist_grad) # Check that the backward passs does not contain invalid # values such as nan or inf assert torch.isfinite(x.grad).all() def test_parameter_resize(self, device): asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device)) for i in range(2): with torch.no_grad(): asd.set_(asd[1:]) asd.grad = None m = torch.cat((asd, asd)) m.sum().backward() @dtypes(torch.double, torch.cdouble) def test_sparse_ctor_getter_backward(self, device, dtype): # See NOTE [ Sparse: autograd and API ] on the expected behavior of this test def _test(size, sparse_dim, nnz, device): v_size = [nnz] + list(size[sparse_dim:]) i = torch.rand(sparse_dim, nnz) i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) i = i.to(torch.long) inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True) other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device, dtype=dtype)[0] def fn(v): x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device) y = (x + other).coalesce() yv = y.values() new_v = yv.tanh() z = torch.sparse_coo_tensor(y.indices(), new_v, y.size()) return z.coalesce().values() gradcheck(fn, (inp,), check_batched_grad=False) # FIXME: make gradgradcheck work. # gradgradcheck(fn, (inp,), check_batched_grad=False) # assert that _values is non-differentiable with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"): other.detach().requires_grad_()._values().backward(torch.ones_like(other._values())) for empty_i, empty_v, empty_nnz in product([True, False], repeat=3): sparse_size = [] if empty_i else [2, 1] dense_size = [1, 0, 2] if empty_v else [1, 2] nnz = 0 if empty_nnz else 5 _test(sparse_size + dense_size, len(sparse_size), nnz, device) @skipMeta @dtypes(torch.double, torch.cdouble) def test_sparse_backward(self, device, dtype): class FixedGradientFunction(Function): @staticmethod def forward(ctx, x, grad_x): ctx.save_for_backward(grad_x) return x @staticmethod def backward(ctx, grad_x): saved_grad_x, = ctx.saved_tensors return saved_grad_x, None size = torch.Size([6, 3, 2]) i1 = torch.tensor([ [0, 3, 4], [0, 2, 2], ], dtype=torch.long) v1 = make_tensor([3, 2], dtype=dtype, device=device) sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device) i2 = torch.tensor([ [0, 1, 3, 4], [0, 1, 2, 2], ], dtype=torch.long) v2 = make_tensor([4, 2], dtype=dtype, device=device) sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device) dense_grad = torch.rand(size, device=device, dtype=dtype) fn = FixedGradientFunction # sparse first x = torch.randn(size, dtype=dtype, device=device, requires_grad=True) (fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2) # dense first x = torch.randn(size, dtype=dtype, device=device, requires_grad=True) (fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2) # sparse only x = torch.randn(size, dtype=dtype, device=device, requires_grad=True) (fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, sparse_grad1 + sparse_grad2) # autograd tests via common_method_invocations don't allow input tensors to # be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when # check_sparse_nnz is set to False.) def test_sparse_mask_autograd(self, device): tensor = torch.randn(3, requires_grad=True, device=device) mask = torch.ones(3, device=device) mask[1] = 0 mask = mask.to_sparse() converted = tensor.sparse_mask(mask).to_dense() converted.sum().backward() self.assertEqual(tensor.grad, mask.to_dense()) def test_pyscalar_conversions(self, device): def _test_pyscalar_conversions(t, integral_conv): # integral -> integral l = t(torch.zeros(1, 1, 1, dtype=torch.long)) pyscalar = -12345 l[0] = pyscalar self.assertEqual(integral_conv(l), pyscalar) # floating point -> floating point f = Variable(t(torch.randn(1, 1, dtype=torch.double))) pyscalar = -12345.1 f[0] = pyscalar self.assertEqual(float(f), pyscalar) f[0] = nan self.assertTrue(math.isnan(float(f))) f[0] = inf self.assertEqual(float(f), inf) f[0] = -inf self.assertEqual(float(f), -inf) # integral -> floating point # check we can convert something that loses precision pyscalar = 1234567890123456789 self.assertNotEqual(pyscalar, integral_conv(float(pyscalar))) l[0] = pyscalar self.assertEqual(float(l), float(pyscalar)) # floating point -> integral f[0] = nan self.assertRaises(ValueError, lambda: integral_conv(f[0])) f[0] = inf self.assertRaises(OverflowError, lambda: integral_conv(f[0])) f[0] = -inf self.assertRaises(OverflowError, lambda: integral_conv(f[0])) f[0] = sys.float_info.max self.assertEqual(integral_conv(f), sys.float_info.max) # bool, nonzero def test_nonzero(tensor, value, expected): tensor[0] = value self.assertEqual(expected, bool(tensor)) self.assertEqual(expected, True if tensor else False) test_nonzero(l, 0, False) test_nonzero(l, -2, True) test_nonzero(f, 0.0, False) test_nonzero(f, sys.float_info.min, True) test_nonzero(f, nan, bool(nan)) test_nonzero(f, inf, bool(inf)) test_nonzero(f, -inf, bool(-inf)) _test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x)) @dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64) @dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64) def test_set_requires_grad_only_for_floats(self, device, dtype): def f1(): a = torch.ones(1, dtype=dtype, device=device) a.requires_grad_() def f2(): a = torch.ones(1, dtype=dtype, device=device) a.requires_grad = True def f3(): torch.ones(1, dtype=dtype, device=device, requires_grad=True) a = torch.ones(1, dtype=dtype, device=device) a.requires_grad = False # should always work a.requires_grad_(False) for f in [f1, f2, f3]: if dtype.is_floating_point: f() else: with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)): f() @onlyCUDA def test_advanced_indexing_backwards_large(self, device): # See https://github.com/pytorch/pytorch/issues/22843 n = (1 << 16) x = torch.rand(n, 1, device=device, requires_grad=True) a = x[:, [0]] a.sum().backward() self.assertEqual(x.grad, torch.ones(n, 1, device=device)) def test_advanced_indexing_backwards_memory_format(self, device): # See https://github.com/pytorch/pytorch/issues/36956 shape = (2, 8, 1, 2) i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last) x = torch.randn(shape, requires_grad=True, device=device) x[i].sum().backward() def _test_reentrant_parent_error_on_cpu(self, device): t1 = torch.rand([3, 3], requires_grad=True) t2 = torch.rand([3, 3], device=device, requires_grad=True) t3 = torch.rand([3, 3], device=device, requires_grad=True) # Parent graph cpu graph. t4 = t1 * t1 t5 = TestAutograd.SimulateBackwardError.apply(t4) # Child gpu graph (much longer than parent graph). prev = t2 * t2 for i in range(10): prev = prev * t2 reentrant_root = prev class ReentrantFunc(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, grad): # Reentrant backward in child will take much longer. reentrant_root.backward() return grad # Parent gpu graph. t6 = ReentrantFunc.apply(t3) t7 = t6 * t6 # Parent graph will error out first, while child graph will continue executing. with self.assertRaisesRegex(Exception, "Simulate error"): torch.autograd.backward([t5.sum(), t7.sum()]) # No grads should be accumulated since child graph will stop execution # after parent receives error. self.assertIsNone(t2.grad) self.assertIsNone(t1.grad) self.assertIsNone(t3.grad) @onlyCUDA def test_reentrant_parent_error_on_cpu(self, device): before = CudaMemoryLeakCheck.get_cuda_memory_usage() # Run as separate function so that gc can clean up everything when we # check for memory usage. self._test_reentrant_parent_error_on_cpu(device) # Wait for autograd thread to cleanup failed tasks. after = CudaMemoryLeakCheck.get_cuda_memory_usage() start = time.time() while before != after and time.time() - start < 30: time.sleep(0.1) after = CudaMemoryLeakCheck.get_cuda_memory_usage() self.assertEqual(before, after) # test for backward in https://github.com/pytorch/pytorch/issues/15511 # TODO: opinfo pdist def test_pdist_large(self, device): def func(x): return torch.pdist(x, p=2) # shape[0] should be able to be (roughly) arbitrarily large, but the kernel # is currently limited to smaller sizes (see issue above); this is just testing # a floor. shape = (1000, 1) x = torch.randn(shape, device=device).requires_grad_() output = torch.pdist(x, p=2) # just run a single backward, as gradcheck/gradgradcheck is expensive here output.sum().backward() # TODO: see if these tests can be ported to OpInfos or moved to where's test suite def test_where_functional(self, device): x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True) cond = mask_not_all_zeros((5, 5)).to(device=device) def where(cond, x, y): return torch.where(cond, x, y) gradcheck(where, [cond, x, y], raise_exception=True) gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)]) x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True) gradcheck(where, [cond, x, y], raise_exception=True) gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)]) def test_where_scalar(self, device): x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True) scalar = 4. cond = mask_not_all_zeros((5, 5)).to(device=device) def where_scalar_first(cond, x): return torch.where(cond, scalar, x) def where_scalar_second(cond, x): return torch.where(cond, x, scalar) gradcheck(where_scalar_first, (cond, x)) gradgradcheck(where_scalar_first, (cond, x)) gradcheck(where_scalar_second, (cond, x)) gradgradcheck(where_scalar_second, (cond, x)) @skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message: https://github.com/pytorch/pytorch/issues/34870""") def test_ctc_loss(self, device): batch_size = 64 num_labels = 101 target_length = 15 gradcheck_input_size = 10 ZERO_NONE = 0 ZERO_SOME = 1 ZERO_ALL = 2 # input_length, vary_lengths, zero_lengths tests = [(150, False, ZERO_NONE), (150, True, ZERO_NONE), (50, True, ZERO_SOME), (50, True, ZERO_ALL)] if 'cuda' in device: tests += [(50, False, ZERO_NONE), (50, True, ZERO_NONE), (150, True, ZERO_SOME), (150, True, ZERO_ALL)] for input_length, vary_lengths, zero_mode in tests: targets = torch.randint(1, num_labels, (batch_size, target_length), device=device, dtype=torch.long) x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True) tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1, device=device) input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item() if vary_lengths or i == 0 else input_length) for i in range(batch_size)] if zero_mode == ZERO_ALL: target_lengths = [0 for _ in range(batch_size)] else: target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item() if vary_lengths else target_length) for _ in range(batch_size)] if zero_mode == ZERO_SOME: idxes = torch.randint(0, batch_size, (10,)) for i in idxes: target_lengths[i] = 0 def ctc_after_softmax(x): x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels] .view(input_length, batch_size, num_labels)) log_probs = torch.log_softmax(x_full, 2) return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths) gradcheck(ctc_after_softmax, [x]) @onlyCUDA @skipCUDAIfRocm @skipCUDAIfCudnnVersionLessThan(7600) def test_ctc_loss_cudnn(self, device): batch_size = 16 input_length = 30 num_labels = 101 target_length = 15 targets = torch.randint(1, num_labels, (batch_size * target_length,), device='cuda', dtype=torch.long) log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2) log_probs.requires_grad_() input_lengths = batch_size * [input_length] target_lengths = batch_size * [target_length] grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float) with torch.backends.cudnn.flags(enabled=False): loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none') grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out) loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32), input_lengths, target_lengths, reduction='none') self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn)) grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out) self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0) def test_leaky_relu_inplace_with_neg_slope(self, device): a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), -2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) def test_leaky_relu_inplace_with_zero_slope(self, device): a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), 0.0) b.backward(torch.ones(3, device=device)) expected = torch.tensor([0., 0., 1.], device=device) self.assertEqual(a.grad, expected) a_bf16 = torch.tensor([-2., 0., 2.], device=device, dtype=torch.bfloat16, requires_grad=True) b_bf16 = torch.nn.functional.leaky_relu_(a_bf16.clone(), 0.0) b_bf16.backward(torch.ones(3, device=device)) expected_bf16 = torch.tensor([0., 0., 1.], device=device, dtype=torch.bfloat16) self.assertEqual(a_bf16.grad, expected_bf16) @onlyOnCPUAndCUDA def test_elu_inplace_with_neg_alpha(self, device): a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.elu_(a.clone(), alpha=-2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.celu_(a.clone(), alpha=-2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) @onlyCUDA def test_free_unneeded_tensor(self, device): x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True) m = torch.randn(1, 3, 1, 1, device=device) z = x.sum() base_mem = torch.cuda.memory_allocated() z = ((x + 2) * m).sum() end_mem = torch.cuda.memory_allocated() # In the end the memory usage should remain equal, because neither of # (x + 2) and ((x + 2) * m) should be kept alive for backward, while the # previous allocation of z had the same size as the current one. self.assertEqual(base_mem, end_mem) @onlyCUDA def test_pin_memory(self, device): x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) self.assertEqual(x, x.pin_memory()) self.assertIsNot(x, x.pin_memory()) self.assertTrue(x.pin_memory().requires_grad) gradcheck(lambda x: x.pin_memory(), [x]) gradgradcheck(lambda x: x.pin_memory(), [x]) @skipCUDAIfRocm @onlyCUDA def test_profiler_emit_nvtx(self, device): # This test is not intended to ensure correctness of nvtx ranges. # That would require something a great deal more complex (you'd have to create a # profile in a subprocess, open it, and parse the sql somehow). # This test is merely intended to catch if emit_nvtx breaks on construction. a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device) with torch.cuda.profiler.profile(): with emit_nvtx(): a.add(1.0) @onlyCUDA def test_rnn_backward_to_input_but_not_parameters(self, device): # this checks whether it is possible to not require # weight parameters, but require inputs, see #7722 l = torch.nn.LSTM(2, 3).to(device) for p in l.parameters(): p.requires_grad = False s = torch.randn(1, 1, 2, requires_grad=True, device=device) out, _ = l(s) out.sum().backward() self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0) @onlyCUDA def test_lstmcell_backward_only_one_output_grad(self, device): # checks that undefined gradients doen't hamper the backward # see #11872 l = torch.nn.LSTMCell(2, 3).to(device).double() s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True) for i in range(2): out = l(s)[i] out.sum().backward() self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0) def _test_rnn_mod(self, mod, inp): def flatten_out(mod, inp): out = mod(inp) return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t]) gradcheckfunc = partial(flatten_out, mod) with torch.backends.cudnn.flags(enabled=False): gradcheck(gradcheckfunc, inp, check_batched_grad=False) gradgradcheck(gradcheckfunc, inp, check_batched_grad=False) if inp.is_cuda and not TEST_WITH_ROCM: # Assert that we have good error message around unsupported CuDNN double backward # NB: we trigger double backward using .backward() instead of autograd.grad due to # https://github.com/pytorch/pytorch/issues/37874 with torch.backends.cudnn.flags(enabled=True): result = gradcheckfunc(inp) result[0].sum().backward(create_graph=True) grad0 = next(mod.parameters()).grad with self.assertRaisesRegex(RuntimeError, "please disable the CuDNN backend temporarily"): grad0.sum().backward() # Here we avoid the backward(create_graph=True) memory leak # described in https://github.com/pytorch/pytorch/issues/7343 for param in mod.parameters(): param.grad = None inp.grad = None @skipMeta # LSTM cell reuses output which was resized def test_LSTM_grad_and_gradgrad(self, device): hsize = 4 inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True) for bias in [True, False]: mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64) self._test_rnn_mod(mod, inp) @skipMeta # GRU cell reuses output which was resized def test_GRU_grad_and_gradgrad(self, device): hsize = 4 inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True) for bias in [True, False]: mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64) self._test_rnn_mod(mod, inp) def test_copysign_subgradient(self, device): # Input is 0.0 x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) # Input is -0.0 x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) # Other is 0.0 x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) # Other is -0.0 x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) @deviceCountAtLeast(1) def test_grad_assignment(self, devices): x = torch.randn(5, 5, device=devices[0]) # Tests that the wrong type raises with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"): x.grad = 0 # Tests that the wrong shape raises with self.assertRaises(RuntimeError): x.grad = torch.randn(2, 2, device=devices[0]) # Tests that the wrong dtype raises with self.assertRaises(RuntimeError): x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0]) # Tests that self-assignment raises with self.assertRaises(RuntimeError): x.grad = x # Tests device -> cpu grad assignment raises if self.device_type != 'cpu': with self.assertRaises(RuntimeError): t_cpu = torch.rand(5, 5) t_cpu.grad = torch.randn(5, 5, device=devices[0]) # Tests half type on CUDA if self.device_type == 'cuda': x = x.to(dtype=torch.half, device=devices[0]) x.grad = torch.zeros_like(x) # Tests cross-device assignment raises if len(devices) > 1: x = torch.randn(5, 5, device=devices[0]) with self.assertRaises(RuntimeError): x.grad = torch.randn(5, 5, device=devices[1]) @deviceCountAtLeast(1) @dtypes(torch.float, torch.double) def test_requires_grad_factory(self, devices, dtype): fns = [torch.ones_like, torch.randn_like] x = torch.randn(2, 3, dtype=dtype, device=devices[0]) for fn in fns: for requires_grad in [True, False]: output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad) self.assertEqual(requires_grad, output.requires_grad) self.assertIs(dtype, output.dtype) self.assertEqual(devices[0], str(x.device)) @deviceCountAtLeast(2) def test_unused_output_device(self, devices): from torch.nn.parallel._functions import Broadcast x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True) outputs = Broadcast.apply(list(range(len(devices))), x) y = outputs[-1] * 2 y.sum().backward() # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2) @deviceCountAtLeast(2) def test_backward_device(self, devices): # check that current device matches the variable's device device = [None] class Identity(torch.autograd.Function): @staticmethod def forward(ctx, x): return x.clone() @staticmethod def backward(ctx, grad_output): device[0] = grad_output.device return grad_output.clone() v = torch.randn(1, device=devices[1], requires_grad=True) Identity.apply(v).backward() self.assertEqual(str(device[0]), devices[1]) @deviceCountAtLeast(2) def test_inputbuffer_add_multidevice(self, devices): input = torch.randn(1, device=devices[0], requires_grad=True) output = input.to(device=devices[1]) + input.to(device=devices[1]) output.backward() @onlyCPU def test_copy_(self, device): # At the time of writing this test, copy_ is not generated from native_functions.yaml # there was a bug that bfloat16 was not recognized as floating. x = torch.randn(10, device=device, requires_grad=True) floating_dt = [dt for dt in get_all_dtypes() if dt.is_floating_point] for dt in floating_dt: y = torch.empty(10, device=device, dtype=dt) y.copy_(x) self.assertTrue(y.requires_grad) z = x.to(torch.bfloat16) self.assertTrue(z.requires_grad) @onlyCUDA def test_simple_reentrant_cross_device(self, device): class ReentrantFunc(Function): _cpu_mode = True @staticmethod def forward(ctx, x): return x * (x + 2) @staticmethod def backward(ctx, grad_output): with torch.enable_grad(): if ReentrantFunc._cpu_mode: new_param = torch.randn(2, 2, requires_grad=True) (new_param ** 2).sum().backward() else: new_param = torch.randn(2, 2, device=device, requires_grad=True) (new_param ** 2).sum().backward() return grad_output # Reentrant starts on GPU thread, finishs on GPU thread x = torch.randn(2, 2, device=device, requires_grad=True) out = ReentrantFunc.apply(x) out.sum().backward() # Reentrant starts on CPU thread, finishs on GPU thread x = torch.randn(2, 2, requires_grad=True) # set ReentrantFunc node to GPU to emit tasks to GPU queue ReentrantFunc._cpu_mode = False out = ReentrantFunc.apply(x) out.sum().backward() # Reentrant starts on GPU thread, finishs on CPU thread x = torch.randn(2, 2, device=device, requires_grad=True) # set ReentrantFunc node to CPU to emit tasks to CPU queue ReentrantFunc._cpu_mode = True out = ReentrantFunc.apply(x) out.sum().backward() @onlyCUDA def test_cross_device_reentrant_autograd(self, device): # Output on gpu so that this task will be associated with the gpu thread def fn_on_gpu(inp): # Artificially increase the priority of the next op to make sure it runs # as soon as we reach it before the ops of branch1. dummy = inp * 2 * 2 * 2 * 2 return inp.to(device=device) def parent_on_cpu(inp): # Slow branch of ops on gpu so that the work queue for the gpu thread # won't empty too quickly. They also have smaller priorities than the # ones created by fn_on_gpu branch1 = inp.to(device=device) branch1 = branch1 / branch1 branch1 = branch1 / branch1 branch1 = branch1 / branch1 # Perform checkpoint on cpu tensors. So the last op performed in the reentrant # autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread. # So the cpu thread will notify the gpu thread with an empty NodeTask. branch2 = checkpoint(fn_on_gpu, inp) out = branch2 + branch1 return out inp = torch.rand(2, requires_grad=True) out = parent_on_cpu(inp) # This will segfault if the empty NodeTask is not handled properly in the # gpu thread ReadyQueue out.sum().backward() def test_inplace_on_view_backprop_base(self, device): # modify view and back-prop through base root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v1.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]]) def test_inplace_on_view_backprop_view_of_view(self, device): # modify view and backprop through view-of-view root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = x.narrow(0, 0, 1) v1.mul_(2) v2.sum().backward() self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]]) def test_inplace_on_view_of_view(self, device): # modify view-of-view and backprop through base root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = v1.narrow(1, 1, 1) v2.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]]) def test_inplace_on_view_then_no_grad(self, device): # Perform an in-place operation on a view of a non-leaf variable. a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True) b = a * 2 c = b.view_as(b) c[0][0] = 3 # Force a graph update with grad disabled. with torch.no_grad(): c.grad_fn c.sum().backward() def test_inplace_on_view_gradcheck(self, device): # gradcheck modifications to views a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True) b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True) def func(root, b): x = root.clone() x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b) x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_on_view_multiple_outputs(self, device): root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_() x = root.clone() v1 = x.unbind() with self.assertRaises(RuntimeError): v1[0].mul_(2) def test_inplace_on_view_of_multiple_output_view(self, device): a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone() b = a.unbind(0) c = b[0].view_as(b[0]) with self.assertRaises(RuntimeError): c.mul_(2) def test_inplace_multiple_output_view_of_view(self, device): a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone() b = a.view_as(a) c = b.unbind(0) with self.assertRaises(RuntimeError): c[0].mul_(2) def test_inplace_on_view_makes_base_require_grad(self, device): # in-place modification to view makes base require grad a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False) b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True) def func(root, b): x = root.clone() self.assertFalse(x.requires_grad) x.narrow(1, 2, 2).mul_(b) self.assertTrue(x.requires_grad) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_on_view_backprop_view(self, device): # modify view and backprop through view a = torch.tensor([2., 5.], device=device, requires_grad=False) b = torch.tensor([3.], device=device, requires_grad=True) res = a.narrow(0, 1, 1).mul_(b) res.sum().backward() self.assertEqual(b.grad.tolist(), [5]) self.assertIsNone(a.grad) def test_inplace_on_view_modify_base(self, device): # Test that an in-place operation on a base that forced it to require # grad also forces any previous views to require grad and backprop # correctly r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True) def fn(r): x = torch.ones(5, dtype=torch.double, device=device) v = x.select(0, 1) self.assertFalse(v.requires_grad) self.assertIsNone(v.grad_fn) x.add_(r) # v is now dependent on r due to the in-place op on x self.assertTrue(v.requires_grad) return v gradcheck(fn, [r]) gradgradcheck(fn, [r]) def test_inplace_on_view_python(self, device): # in-place modifications of Python-autograd created view a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True) b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True) class PyAdd(torch.autograd.Function): @staticmethod def forward(ctx, x, y): ctx.mark_dirty(x) x.add_(y) return x @staticmethod def backward(ctx, grad): return grad, grad def func(root, b): x = root.clone() PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b) PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_on_view_non_contig(self, device): root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = v1.narrow(1, 1, 1) v2.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]]) def test_inplace_on_view_multi_output_unsafe(self, device): for f in [lambda t: t.unsafe_split(1), lambda t: t.unsafe_split_with_sizes((1, 1, 1)), lambda t: t.unsafe_chunk(3)]: a = torch.randn(3, 3, device=device, requires_grad=True) b = a + a s1, s2, s3 = f(b) s1.mul_(s2) s1.sum().backward() def test_inplace_on_view_multi_output_safe(self, device): for f in [lambda t: t.split(1), lambda t: t.split_with_sizes((1, 1, 1)), lambda t: t.chunk(3)]: a = torch.randn(3, 3, device=device, requires_grad=True) b = a + a s1, s2, s3 = f(b) error_msg = 'This view is the output of a function that returns multiple views.' with self.assertRaisesRegex(RuntimeError, error_msg): s1.mul_(s2) def test_mv_grad_stride_0(self, device): # Reference: https://github.com/pytorch/pytorch/issues/38315 mat = torch.randn(2, 2, dtype=torch.double, device=device) vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True) def fn(vec): # Expand inside the function to make sure the input to # gradcheck does not have overlapping memory vec = vec.expand(2) return (mat @ vec).sum() gradcheck(fn, (vec)) gradgradcheck(fn, (vec)) @onlyCUDA def test_gradcheck_input_output_different_device(self, device): x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True) gradcheck(lambda x: x.to("cpu"), (x,)) x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True) gradcheck(lambda x: x.to("cuda"), (x,)) # TODO: see if this can be OpInfo'd or moved to test_reductions.py def test_logcumsumexp_large_value(self, device): a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True) with torch.no_grad(): # Large Number a[0] = 10000 gradcheck(lambda x: x.logcumsumexp(0), a) gradgradcheck(lambda x: x.logcumsumexp(0), a) gradcheck(lambda x: x.logcumsumexp(1), a) gradgradcheck(lambda x: x.logcumsumexp(1), a) gradcheck(lambda x: x.logcumsumexp(2), a) gradgradcheck(lambda x: x.logcumsumexp(2), a) def test_strided_leaf_grad_layout(self, device): # (1) If leaf is non-overlapping and dense, grad's layout should match its leaf. for fmt_a in (torch.contiguous_format, torch.channels_last): for fmt_b in (torch.contiguous_format, torch.channels_last): a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a) b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b) a.requires_grad_() b.requires_grad_() # checks (1) for broadcasted gradients a.sum().backward() self.assertEqual(a.grad.stride(), a.stride()) b.sum().backward() self.assertEqual(b.grad.stride(), b.stride()) # checks (1) for non-broadcasted gradients a.grad = None b.grad = None (a * b).sum().backward() self.assertEqual(a.grad.stride(), a.stride()) self.assertEqual(b.grad.stride(), b.stride()) # (2) If leaf isn't dense, checks that grads are rowmajor contiguous. c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device)) c.requires_grad_() d = torch.rand((2, 2), device=device) # checks (2) for broadcasted gradients c.sum().backward() self.assertEqual(c.grad.stride(), (2, 1)) # checks (2) for non-broadcasted gradients c.grad = None (c * d).sum().backward() self.assertEqual(c.grad.stride(), (2, 1)) # TODO: OpInfo this or move to atleast's test suite def _test_atleast(self, device, torch_fn): # 0-dim s = torch.tensor(0.5, dtype=torch.double, requires_grad=True) gradcheck(lambda x: torch_fn(x), s) gradgradcheck(lambda x: torch_fn(x), s) # 1-dim a = torch.rand(4, dtype=torch.double, requires_grad=True) gradcheck(lambda x: torch_fn(x), a) gradgradcheck(lambda x: torch_fn(x), a) # 2,3,4-dim b = torch.rand(4, 3, dtype=torch.double, requires_grad=True) c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True) d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True) input_tuple = (s, a, b, c, d) gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple) gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple) def test_atleast(self, device): self._test_atleast(device, torch.atleast_1d) self._test_atleast(device, torch.atleast_2d) self._test_atleast(device, torch.atleast_3d) # TODO: opinfo this or move to test_binary_ufuncs.py def test_xlogy(self, device): def _tensor_tensor_helper(x, y): gradcheck(lambda x, y: torch.xlogy(x, y), (x, y)) gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y)) with torch.no_grad(): x = x.clone() x[torch.rand_like(x) > 0.5] = 0 gradcheck(lambda y: torch.xlogy(x, y), (y)) gradgradcheck(lambda y: torch.xlogy(x, y), (y)) shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4)) # For broadcastible shapes and scalar. for x_shape, y_shape in permutations(shapes, 2): x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True) y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True) _tensor_tensor_helper(x, y) _tensor_tensor_helper(y, x) gradcheck(lambda y: torch.xlogy(0, y), (y)) gradgradcheck(lambda y: torch.xlogy(0, y), (y)) gradcheck(lambda y: torch.xlogy(2, y), (y)) gradgradcheck(lambda y: torch.xlogy(2, y), (y)) gradcheck(lambda y: torch.xlogy(y, 2), (y)) gradgradcheck(lambda y: torch.xlogy(y, 2), (y)) # Different shape x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True) _tensor_tensor_helper(x, y) _tensor_tensor_helper(y, x) _tensor_tensor_helper(x, x) _tensor_tensor_helper(y, y) # Same shape x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True) _tensor_tensor_helper(x, y) _tensor_tensor_helper(y, x) _tensor_tensor_helper(x, x) _tensor_tensor_helper(y, y) def test_copy_r_to_c(self, device): out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device) inp_r = torch.randn(3, 2, dtype=torch.double, device=device, requires_grad=True) def do_test(): out_c.copy_(inp_r) out_c.sum().backward() self.assertEqual(inp_r.grad, torch.ones_like(inp_r)) self.assertNotWarn(do_test) def test_non_differentiable_ops(self, device): # Just make sure the op doesn't raise an error # and resulting tensor has requires_grad=False. x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device) out = torch.isin(x, torch.tensor([2, 3], device=device)) self.assertFalse(out.requires_grad) x = torch.randn(3, 3, requires_grad=True) out = torch.signbit(x) self.assertFalse(out.requires_grad) class TestAutogradInferenceMode(TestCase): def _is_inference_tensor(self, tensor): try: err_msg = "Inference tensors do not track version counter" with self.assertRaisesRegex(RuntimeError, err_msg): tensor._version return True except AssertionError as e: return False def test_inference_mode_context_manager(self): self.assertFalse(torch.is_inference_mode_enabled()) with torch.inference_mode(): self.assertTrue(torch.is_inference_mode_enabled()) with torch.inference_mode(False): self.assertFalse(torch.is_inference_mode_enabled()) self.assertTrue(torch.is_inference_mode_enabled()) self.assertFalse(torch.is_inference_mode_enabled()) def test_inference_mode_decorator(self): @torch.inference_mode() def func(x): self.assertTrue(torch.is_inference_mode_enabled()) return x * x for requires_grad in (True, False): c = torch.ones(1, 2, 3, requires_grad=requires_grad) d = func(c) self.assertTrue(torch.is_inference(d)) self.assertFalse(d.requires_grad) def test_inference_mode_tensor_creation(self): with torch.inference_mode(): # new tensors created through constructors are inference tensors c = torch.ones(1, 2, 3) self.assertFalse(c.requires_grad) self.assertTrue(torch.is_inference(c)) # requires_grad doesn't change inference tensor behavior in InferenceMode tmp = torch.ones(1, 2, 3, requires_grad=True) self.assertTrue(tmp.requires_grad) self.assertTrue(torch.is_inference(tmp)) tmp = torch.ones(1, 2, 3).requires_grad_(False) self.assertFalse(tmp.requires_grad) self.assertTrue(torch.is_inference(tmp)) def test_inference_mode_existing_autograd_session(self): s = torch.ones(1, 2, 3, requires_grad=True) a = s.clone() # `a` gets saved outside of inference mode out = a * a with torch.inference_mode(): a.add_(2) self.assertFalse(torch.is_inference(a)) # tensors created outside of inference mode aren't # inference tensors, so they will still have their # version counters tracked err_msg = ("one of the variables needed for gradient computation has been " "modified by an inplace operation") with self.assertRaisesRegex(RuntimeError, err_msg): out.backward(torch.ones_like(out)) def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self): def functional_op(x): return x * x with torch.inference_mode(): for requires_grad in (True, False): c = torch.ones(1, 2, 3, requires_grad=requires_grad) # performing a non-view operation produces a inference tensor # that does not require grad func_out = functional_op(c) self.assertTrue(torch.is_inference(func_out)) self.assertFalse(func_out.requires_grad) def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self): @torch.inference_mode() def run_test(fn): for requires_grad in (True, False): c = torch.ones(1, 2, 3, requires_grad=requires_grad) # after performing inplace operation, tensor is still # an inference tensor fn(c) self.assertTrue(torch.is_inference(c)) self.assertEqual(c.requires_grad, requires_grad) run_test(lambda x: x.add_(2)) run_test(lambda x: x.transpose_(0, 1)) def test_inference_mode_inf_tensor_in_inf_mode_view_op(self): with torch.inference_mode(): for requires_grad in (True, False): c = torch.ones(1, 2, 3, requires_grad=requires_grad) # perform view operation produces inference tensor # that does not require grad view_out = c.view(-1) self.assertTrue(torch.is_inference(view_out)) self.assertFalse(view_out.requires_grad) def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self): def functional_op(x): return x * x for requires_grad in (True, False): with torch.inference_mode(): c = torch.ones(1, 2, 3, requires_grad=requires_grad) func_out = functional_op(c) self.assertFalse(torch.is_inference(func_out)) self.assertFalse(func_out.requires_grad) self.assertTrue(func_out.is_leaf) def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self): def run_test(fn): for requires_grad in (False, True): with torch.inference_mode(): c = torch.ones(1, 2, 3, requires_grad=requires_grad) if requires_grad: # leaf variable that requires grad is being used in an inplace # operation when requires_grad=True pass else: err_msg = "Inplace update to inference tensor outside InferenceMode" with self.assertRaisesRegex(RuntimeError, err_msg): fn(c) run_test(lambda x: x.add_(2)) run_test(lambda x: x.transpose_(0, 1)) def test_inference_mode_inf_tensor_in_normal_mode_view_op(self): for requires_grad in (True, False): with torch.inference_mode(): c = torch.ones(1, 2, 3, requires_grad=requires_grad) out = c.view(-1) self.assertTrue(torch.is_inference(out)) self.assertFalse(out.requires_grad) self.assertFalse(out._is_view()) self.assertTrue(out.is_leaf) def test_normal_tensor_inplace_output_in_inference_mode(self): def run_test(fn): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) a = s.clone() with torch.inference_mode(): fn(a) self.assertFalse(torch.is_inference(a)) self.assertEqual(a.requires_grad, requires_grad) # inplace -> inplace fn(a) self.assertFalse(torch.is_inference(a)) self.assertEqual(a.requires_grad, requires_grad) # inplace -> inplace -> view view_out = a.view(-1) self.assertFalse(torch.is_inference(view_out)) self.assertEqual(view_out.requires_grad, requires_grad) run_test(lambda x: x.add_(2)) run_test(lambda x: x.transpose_(0, 1)) def test_normal_tensor_inplace_output_in_normal_mode(self): def run_test(fn): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) a = s.clone() with torch.inference_mode(): fn(a) self.assertFalse(torch.is_inference(a)) self.assertEqual(a.requires_grad, requires_grad) fn(a) self.assertFalse(torch.is_inference(a)) self.assertEqual(a.requires_grad, requires_grad) # inplace -> inplace fn(a) self.assertFalse(torch.is_inference(a)) self.assertEqual(a.requires_grad, requires_grad) # inplace -> inplace -> view view_out = a.view(-1) self.assertFalse(torch.is_inference(view_out)) self.assertEqual(view_out.requires_grad, requires_grad) run_test(lambda x: x.add_(2)) run_test(lambda x: x.transpose_(0, 1)) def test_normal_tensor_view_output_in_inference_mode(self): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) a = s.clone() with torch.inference_mode(): out = a.view(-1) self.assertFalse(torch.is_inference(out)) self.assertEqual(out.requires_grad, requires_grad) self.assertTrue(out._is_view()) # view -> view tmp = out.view(-1) self.assertFalse(torch.is_inference(tmp)) self.assertEqual(tmp.requires_grad, requires_grad) self.assertTrue(tmp._is_view()) self.assertTrue(tmp.is_leaf) # view -> view -> inplace self.assertTrue(torch.is_inference_mode_enabled()) tmp.add_(2) self.assertFalse(torch.is_inference(tmp)) self.assertEqual(tmp.requires_grad, requires_grad) # Accessing is_leaf in python tries to update grad_fn and raises: # A view was created in inference mode and its base or # another view of its base has been modified inplace in normal mode # tmp.is_leaf self.assertEqual(a._version, tmp._version) def test_normal_tensor_view_output_in_normal_mode(self): def functional_op(x): return x * x for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) a = s.clone() with torch.inference_mode(): out = a.view(-1) self.assertFalse(torch.is_inference(out)) self.assertEqual(out.requires_grad, requires_grad) self.assertTrue(out._is_view()) self.assertTrue(out.is_leaf) tmp = functional_op(out) self.assertFalse(torch.is_inference(tmp)) self.assertEqual(tmp.requires_grad, requires_grad) if requires_grad: err_msg = "A view was created in inference mode and is being modified inplace" with self.assertRaisesRegex(RuntimeError, err_msg): out.add_(2) pass else: out.add_(2) tmp = out.view(2, 3) self.assertFalse(torch.is_inference(tmp)) self.assertEqual(tmp.requires_grad, requires_grad) def test_mix_inference_and_normal_tensor_functional_op(self): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) with torch.inference_mode(): c = torch.ones(1, 2, 3, requires_grad=requires_grad) # add is safe since it doesn't save any variable for backward out = c.add(s) self.assertFalse(torch.is_inference(out)) self.assertEqual(out.requires_grad, requires_grad) if requires_grad: # leaf inference tensor with requires_grad=True can still have gradient out.backward(torch.ones_like(out)) self.assertEqual(c.grad, torch.ones_like(c)) if requires_grad: err_msg = "Inference tensors cannot be saved for backward" with self.assertRaisesRegex(RuntimeError, err_msg): c * s # inference tensor in TensorList input inputs = [s, c] with self.assertRaisesRegex(RuntimeError, err_msg): torch.stack(inputs) def test_mix_inference_and_normal_tensor_inplace_op(self): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) a = s.clone() with torch.inference_mode(): c = torch.ones(1, 2, 3) self.assertTrue(torch.is_inference(c)) if requires_grad: err_msg = "Inference tensors cannot be saved for backward" with self.assertRaisesRegex(RuntimeError, err_msg): a.mul_(c) # inference tensor in TensorList input err_msg = ("out=... arguments don't support automatic differentiation, " "but one of the arguments requires grad") with self.assertRaisesRegex(RuntimeError, err_msg): torch.mul(s, s, out=c) else: a.mul_(c) err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed" with self.assertRaisesRegex(RuntimeError, err_msg): torch.mul(s, s, out=c) def test_mix_inference_and_normal_tensor_view_op(self): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) with torch.inference_mode(): c = torch.ones(1, 2, 3) # view_as is a composite op which calls view with only one # tensor argument. So there isn't a mixed inference and normal # tensor inputs for view ops tmp1 = c.view_as(s) self.assertTrue(torch.is_inference(tmp1)) self.assertFalse(tmp1.requires_grad) # this is fine since its equivalent as s.view(c.sizes()) which # isn't a mixed input scenario tmp2 = s.view_as(c) self.assertFalse(torch.is_inference(tmp2)) self.assertEqual(tmp2.requires_grad, requires_grad) def test_inference_mode_handle_direct_view_on_rebase(self): def run_test(fn): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) a = s.clone() with torch.inference_mode(): view_out = a.view_as(a) if requires_grad: err_msg = "A view was created in inference mode and is being modified inplace" with self.assertRaisesRegex(RuntimeError, err_msg): fn(view_out) pass else: fn(view_out) run_test(lambda x: x.add_(2)) run_test(lambda x: x.transpose_(0, 1)) def test_inference_mode_handle_indirect_view_on_rebase(self): def run_test(fn): for requires_grad in (True, False): s = torch.ones(1, 2, 3, requires_grad=requires_grad) a = s.clone() with torch.inference_mode(): view_out = a.view(-1) fn(a) if requires_grad: err_msg = "A view was created in inference mode and its base or another view " with self.assertRaisesRegex(RuntimeError, err_msg): view_out.grad_fn pass else: view_out.grad_fn run_test(lambda x: x.add_(2)) run_test(lambda x: x.transpose_(0, 1)) class TestMultithreadAutograd(TestCase): def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None): class PropagatingThread(threading.Thread): '''Helper class to propagate exception from child thread to main thread on join. Reference: https://stackoverflow.com/a/31614591/5602957 ''' def run(self): self.exception = None try: self.ret = super(PropagatingThread, self).run() except Exception as e: self.exception = e def join(self, timeout=None): super(PropagatingThread, self).join(timeout) if self.exception: raise self.exception from self.exception return self.ret threads = [] for _ in range(num_threads): p = PropagatingThread(target=fn, args=args) p.start() threads.append(p) for p in threads: p.join() def test_multithreaded_exception_propagation(self): # Test whether exception in child thread # are propagated to main thread. def fn(): self.assertTrue(False) with self.assertRaises(AssertionError): self._run_py_multithread_fn(fn) def test_simple_backward(self): # simple multithreaded backward that create threads in the beginning of training # and everything else is training separately, i.e. inputs, operations, etc. def train_fn(): x = torch.ones(5, 5, requires_grad=True) y = (x + 3) * (x + 4) * 0.5 y.sum().backward() self.assertEqual(x.grad, x + 3.5) self._run_py_multithread_fn(train_fn) def test_simple_backward_same_input(self): # simple multithreaded backward with only shared inputs (i.e. This is common # for things like Hogwild multithreaded training with multiple CPU threads) def train_fn_backward(x): y = (x + 3) * (x + 4) * 0.5 y.sum().backward() x = torch.ones(5, 5, requires_grad=True) self._run_py_multithread_fn(train_fn_backward, (x,)) # Since we are calling backward from multiple threads # and all threads share the same input, when we do backward # concurrently, different backwards will all accumulate to # the same .grad for each input, and the gradients should # be equal to num_threads * gradient self.assertEqual(x.grad, 10 * (x + 3.5)) def train_fn_grad(x): y = (x + 3) * (x + 4) * 0.5 grads = torch.autograd.grad(y.sum(), x) self.assertEqual(len(grads), 1) self.assertEqual(grads[0], x + 3.5) # since we use functional grad() api, gradients will not # be accumulate to the same place and should be the same self._run_py_multithread_fn(train_fn_grad, (x,)) def test_multithread_saved_tensors_hooks(self): def pack(x): warnings.warn("pack") return x def registers_hooks_for_each_thread(): with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x): x = torch.ones(5, 5, requires_grad=True) with warnings.catch_warnings(record=True) as w: y = x * x # should raise two warnings from x being saved twice self.assertEqual(len(w), 2) y.sum().backward() def test_dataparallel_saved_tensors_hooks(self): def pack(x): warnings.warn("pack") return x _self = self class Model(torch.nn.Module): def forward(self, x): with warnings.catch_warnings(record=True) as w: y = x * x if torch.cuda.device_count() >= 2: # DataParallel is calling the forward in different threads # without progating TLS, so hooks should not be called here _self.assertEqual(len(w), 0) else: # DataParallel only uses one thread # so hooks should be called here _self.assertGreater(len(w), 0) x = torch.ones(5, 5, requires_grad=True) model = torch.nn.DataParallel(Model()) with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x): model(x) with warnings.catch_warnings(record=True) as w: y = x * x # hooks should be called here _self.assertGreater(len(w), 0) def test_python_thread_in_middle(self): # User might write a network that starts on one CPU thread, then runs its second half # concurrently with other threads (either via python threading or fork/join calls), # then calls backward()/grad() on BOTH threads, like a Y pattern from input at the # bottom to output at the top. This way part of the GraphTask is being shared across # different threads and we need to ensure user specify retain_graph=True, otherwise # error out with the correct error message # Case 1: multiple backward with python threads, retain_graph=False # should throw error in some threads with no retain_graph. success_vs_raises = [0, 0] def train_fn_no_retain_graph(x): y = x + x ** 2 try: y.sum().backward() success_vs_raises[0] += 1 except RuntimeError as error: success_vs_raises[1] += 1 self.assertRegex(str(error), "Specify retain_graph=True") x_no_retain = torch.ones(5, 5, requires_grad=True) y_no_retain = x_no_retain + x_no_retain ** 2 self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5) # at least one thread will be success in this case, all other threads should raise # with the error that throw to user to recommend them specify retain_graph=True self.assertTrue(success_vs_raises[0] >= 1) # multiple backward with python threads, no error with retain_graph=True def train_fn_retain_graph(x): y = x + x ** 2 y.sum().backward(retain_graph=True) x_retain = torch.ones(5, 5, requires_grad=True) y_retain = x_retain + x_retain ** 2 self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5) # result should equal to num_thread * gradients self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1)) def test_fork_join_in_middle(self): # multiple backward with jit threads (fork/join primitive) # similar to test_python_thread_in_middle, we test with retain_graph=False/True # Case 1: multiple grad() calls with jit threads, retain_graph=False # should throw error in some threads with no retain_graph. @torch.jit.script def train_fn_jit_no_retain(middle, orig_x): y = middle + middle ** 2 return torch.autograd.grad([y.sum()], [orig_x]) @torch.jit.script def train_fn_fork_join_calls_no_retain(x): y_no_retain = (x + 3) * (x + 4) * 0.5 fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x) grad_hat = train_fn_jit_no_retain(y_no_retain, x) grad = torch.jit._wait(fut) return grad, grad_hat try: train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True)) except RuntimeError as error: self.assertRegex(str(error), "Specify retain_graph=True") # Case 2: no error with retain_graph=True @torch.jit.script def train_fn_jit_retain(middle, orig_x): y = middle + middle ** 2 return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True) @torch.jit.script def train_fn_fork_join_calls_retain(x): y_retain = (x + 3) * (x + 4) * 0.5 fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x) fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x) grad = train_fn_jit_retain(y_retain, x) grad1 = torch.jit._wait(fut1) grad2 = torch.jit._wait(fut2) return grad, grad1, grad2 grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True)) self.assertEqual(grad, grad1) self.assertEqual(grad, grad2) def test_preserve_backtrace(self): class Foo(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, *grad): raise ValueError("something") t = torch.rand(10, requires_grad=True) try: Foo.apply(t).sum().backward() except Exception: import traceback tb = sys.exc_info()[2] tb_str = "\n".join(traceback.format_tb(tb)) self.assertTrue('raise ValueError("something")' in tb_str) # TODO(@anjali411): add an OpInfo based test for torch.cat # Issue: https://github.com/pytorch/pytorch/issues/51627 def test_cat_r_to_c(self): inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True) inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True) def fn(x1, x2): return torch.cat((x1, x2), dim=-1) torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True) torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True) # Import test cases from below autograd/ here. These are found # implicitly by the loader, so Flake8 thinks they are unused, hence # the suppressions. from autograd.test_complex import TestAutogradComplex # noqa: F401 # e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA instantiate_device_type_tests( TestAutogradDeviceType, globals(), except_for=None ) if __name__ == '__main__': run_tests()
eval.py
import argparse import logging import os import pickle import time import torch.multiprocessing as mp from dataloader import EvalDataset, TrainDataset, get_dataset backend = os.environ.get("DGLBACKEND") if backend.lower() == "mxnet": from train_mxnet import load_model_from_checkpoint, test else: from train_pytorch import load_model_from_checkpoint, test class ArgParser(argparse.ArgumentParser): def __init__(self): super(ArgParser, self).__init__() self.add_argument( "--model_name", default="TransE", choices=[ "TransE", "TransH", "TransR", "TransD", "RESCAL", "DistMult", "ComplEx", "RotatE", "pRotatE", ], help="model to use", ) self.add_argument("--data_path", type=str, default="data", help="root path of all dataset") self.add_argument( "--dataset", type=str, default="FB15k", help="dataset name, under data_path" ) self.add_argument("--format", type=str, default="1", help="the format of the dataset.") self.add_argument( "--model_path", type=str, default="ckpts", help="the place where models are saved" ) self.add_argument( "--batch_size", type=int, default=8, help="batch size used for eval and test" ) self.add_argument( "--neg_sample_size", type=int, default=-1, help="negative sampling size for testing" ) self.add_argument( "--hidden_dim", type=int, default=256, help="hidden dim used by relation and entity" ) self.add_argument("-g", "--gamma", type=float, default=12.0, help="margin value") self.add_argument( "--eval_percent", type=float, default=1, help="sample some percentage for evaluation." ) self.add_argument("--gpu", type=int, default=-1, help="use GPU") self.add_argument("--mix_cpu_gpu", action="store_true", help="mix CPU and GPU training") self.add_argument( "-de", "--double_ent", action="store_true", help="double entitiy dim for complex number" ) self.add_argument( "-dr", "--double_rel", action="store_true", help="double relation dim for complex number", ) self.add_argument("--seed", type=int, default=0, help="set random seed fro reproducibility") self.add_argument( "--num_worker", type=int, default=16, help="number of workers used for loading data" ) self.add_argument("--num_proc", type=int, default=1, help="number of process used") def parse_args(self): args = super().parse_args() return args def get_logger(args): if not os.path.exists(args.model_path): raise Exception("No existing model_path: " + args.model_path) log_file = os.path.join(args.model_path, "eval.log") logging.basicConfig( format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S", filename=log_file, filemode="w", ) logger = logging.getLogger(__name__) print("Logs are being recorded at: {}".format(log_file)) return logger def main(args): # load dataset and samplers dataset = get_dataset(args.data_path, args.dataset, args.format) args.pickle_graph = False args.train = False args.valid = False args.test = True args.batch_size_eval = args.batch_size logger = get_logger(args) # Here we want to use the regualr negative sampler because we need to ensure that # all positive edges are excluded. eval_dataset = EvalDataset(dataset, args) args.neg_sample_size_test = args.neg_sample_size if args.neg_sample_size < 0: args.neg_sample_size_test = args.neg_sample_size = eval_dataset.g.number_of_nodes() if args.num_proc > 1: test_sampler_tails = [] test_sampler_heads = [] for i in range(args.num_proc): test_sampler_head = eval_dataset.create_sampler( "test", args.batch_size, args.neg_sample_size, mode="PBG-head", num_workers=args.num_worker, rank=i, ranks=args.num_proc, ) test_sampler_tail = eval_dataset.create_sampler( "test", args.batch_size, args.neg_sample_size, mode="PBG-tail", num_workers=args.num_worker, rank=i, ranks=args.num_proc, ) test_sampler_heads.append(test_sampler_head) test_sampler_tails.append(test_sampler_tail) else: test_sampler_head = eval_dataset.create_sampler( "test", args.batch_size, args.neg_sample_size, mode="PBG-head", num_workers=args.num_worker, rank=0, ranks=1, ) test_sampler_tail = eval_dataset.create_sampler( "test", args.batch_size, args.neg_sample_size, mode="PBG-tail", num_workers=args.num_worker, rank=0, ranks=1, ) # load model n_entities = dataset.n_entities n_relations = dataset.n_relations ckpt_path = args.model_path model = load_model_from_checkpoint(logger, args, n_entities, n_relations, ckpt_path) if args.num_proc > 1: model.share_memory() # test args.step = 0 args.max_step = 0 if args.num_proc > 1: procs = [] for i in range(args.num_proc): proc = mp.Process( target=test, args=(args, model, [test_sampler_heads[i], test_sampler_tails[i]]) ) procs.append(proc) proc.start() for proc in procs: proc.join() else: test(args, model, [test_sampler_head, test_sampler_tail]) if __name__ == "__main__": args = ArgParser().parse_args() main(args)
inkscape_control.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import paste import subprocess from multiprocessing import Process from pathlib import Path import tkinter.messagebox as messagebox from shutil import copy from appdirs import user_config_dir import logging as log from globe import Globe as globe from util import StrUtil as strutil import workspace SYSTEM = globe.SYSTEM if SYSTEM == "Darwin": from pynput import keyboard elif SYSTEM == "Windows": import keyboard import mouse as w_mouse user_dir = Path(user_config_dir("project", "ww")) if not user_dir.is_dir(): user_dir.mkdir(parents=True) roots_file = user_dir / 'roots' template = user_dir / 'template.svg' config = user_dir / 'config.py' if not template.is_file(): source = str(Path(__file__).parent / 'template.svg') destination = str(template) copy(source, destination) def inkscape(path): log.info("Inkscape function started") # # def for_canonical(f): # log.info("for_canonical") # return lambda k: f(l.canonical(k)) # hotkey = keyboard.HotKey( # keyboard.HotKey.parse('<cmd>+u'), # on_activate) if SYSTEM == "Darwin": processOpen = subprocess.Popen(['/Applications/Inkscape.app/Contents/MacOS/inkscape', str(path)]) log.info("Opening file") elif SYSTEM == "Windows": processOpen = subprocess.Popen(['inkscape', str(path)]) log.info("Opening file") # with keyboard.GlobalHotKeys({'<cmd>+i': paste.open_vim}) as hotkey: # hotkey.join() # l = keyboard.Listener( # on_press=for_canonical(hotkey.press), # on_release=for_canonical(hotkey.release), # # suppress=True # ) # l.start() processOpen.wait() log.info("Inkscape terminated") if SYSTEM == "Darwin": version = os.popen('/Applications/Inkscape.app/Contents/MacOS/inkscape --version').readlines() if '4035a4f' not in str(version): messagebox.showinfo('警告!', 'inkscape版本可能不兼容!导致并没有生成latex能识别的文件,请检查是否为1.0 (4035a4f, 2020-05-01)') inkscape_name = '/Applications/Inkscape.app/Contents/MacOS/inkscape' subprocess.Popen([inkscape_name, str(path), '-o', str(path.with_suffix(".pdf")), '--export-latex']) #else: #os.system('/Applications/Inkscape.app/Contents/MacOS/inkscape '+ str(path)+ ' --export-file='+str(path.with_suffix(".pdf"))+' --export-latex') elif SYSTEM == "Windows": subprocess.Popen(['inkscape', str(path), '-o', str(path.with_suffix(".pdf")), '--export-latex']) log.info("Export to pdf_tex process and InkscapeProcess terminated") def create(factor): # """ # Creates a figure. # First argument is the title of the figure # Second argument is the figure directory. # """ # title = title.strip() # file_name = title.replace(' ', '-').lower() + '.svg' # figures = root + os.path.sep + 'figures'+os.path.sep # figure_path = figures + file_name # # If a file with this name already exists, append a '2'. # if Path(figure_path).exists(): # title = title + '-2' # create(title,root) # else: # figure_path = Path(figure_path).absolute() # inkscape(figure_path) """ Creates a figure. First argument is the title of the figure Second argument is the figure directory. """ workspace.sub('figures') log.debug("File name without extension " + factor['fileName']) file_fullname = factor['fileName'] + '.svg' log.debug("File name " + file_fullname) figures_dir = Path(globe.workspace['sub']['figures']) figure_path = figures_dir / file_fullname # If a file with this name already exists, quit #TODO: 查重工作应该放在paste中完成,也许可以将功能封装,放在util里 if figure_path.exists(): log.warning("{} already exists. Edit but not create.".format(str(figure_path))) else: copy(str(template), str(figure_path)) log.info("Template copied") log.info("Starting Inkscape") process_inkscape = Process(target=inkscape, args=(figure_path,)) process_inkscape.start() return
generate_trajectories.py
import os import sys sys.path.append(os.path.join('/Users/jiasenl/Code/alfred')) sys.path.append(os.path.join('/Users/jiasenl/Code/alfred', 'gen')) import time import multiprocessing as mp import json import random import shutil import argparse import numpy as np import pandas as pd from collections import OrderedDict from datetime import datetime import glob import constants from agents.deterministic_planner_agent import DeterministicPlannerAgent from env.thor_env import ThorEnv from game_states.task_game_state_full_knowledge import TaskGameStateFullKnowledge from utils.video_util import VideoSaver from utils.dataset_management_util import load_successes_from_disk, load_fails_from_disk # params RAW_IMAGES_FOLDER = 'raw_images/' DATA_JSON_FILENAME = 'traj_data.json' DEPTH_IMAGES_FOLDER = 'depth_images/' # video saver video_saver = VideoSaver() # structures to help with constraint enforcement. goal_to_required_variables = {"pick_and_place_simple": {"pickup", "receptacle", "scene"}, "pick_two_obj_and_place": {"pickup", "receptacle", "scene"}, "look_at_obj_in_light": {"pickup", "receptacle", "scene"}, "pick_clean_then_place_in_recep": {"pickup", "receptacle", "scene"}, "pick_heat_then_place_in_recep": {"pickup", "receptacle", "scene"}, "pick_cool_then_place_in_recep": {"pickup", "receptacle", "scene"}, "pick_and_place_with_movable_recep": {"pickup", "movable", "receptacle", "scene"}} goal_to_pickup_type = {'pick_heat_then_place_in_recep': 'Heatable', 'pick_cool_then_place_in_recep': 'Coolable', 'pick_clean_then_place_in_recep': 'Cleanable'} goal_to_receptacle_type = {'look_at_obj_in_light': "Toggleable"} goal_to_invalid_receptacle = {'pick_heat_then_place_in_recep': {'Microwave'}, 'pick_cool_then_place_in_recep': {'Fridge'}, 'pick_clean_then_place_in_recep': {'SinkBasin'}, 'pick_two_obj_and_place': {'CoffeeMachine', 'ToiletPaperHanger', 'HandTowelHolder'}} scene_id_to_objs = {} obj_to_scene_ids = {} scenes_for_goal = {g: [] for g in constants.GOALS} scene_to_type = {} def sample_task_params(succ_traj, full_traj, fail_traj, goal_candidates, pickup_candidates, movable_candidates, receptacle_candidates, scene_candidates, inject_noise=10): # Get the current conditional distributions of all variables (goal/pickup/receptacle/scene). goal_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) + succ_traj.loc[ (succ_traj['pickup'].isin(pickup_candidates) if 'pickup' in goal_to_required_variables[c] else True) & (succ_traj['movable'].isin(movable_candidates) if 'movable' in goal_to_required_variables[c] else True) & (succ_traj['receptacle'].isin(receptacle_candidates) if 'receptacle' in goal_to_required_variables[c] else True) & (succ_traj['scene'].isin(scene_candidates) if 'scene' in goal_to_required_variables[c] else True)] ['goal'].tolist().count(c))) # Conditional. * (1 / (1 + succ_traj['goal'].tolist().count(c))) # Prior. for c in goal_candidates] goal_probs = [w / sum(goal_weight) for w in goal_weight] pickup_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) + sum([succ_traj.loc[ succ_traj['goal'].isin([g]) & (succ_traj['movable'].isin(movable_candidates) if 'movable' in goal_to_required_variables[g] else True) & (succ_traj['receptacle'].isin(receptacle_candidates) if 'receptacle' in goal_to_required_variables[g] else True) & (succ_traj['scene'].isin(scene_candidates) if 'scene' in goal_to_required_variables[g] else True)] ['pickup'].tolist().count(c) for g in goal_candidates]))) * (1 / (1 + succ_traj['pickup'].tolist().count(c))) for c in pickup_candidates] pickup_probs = [w / sum(pickup_weight) for w in pickup_weight] movable_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) + sum([succ_traj.loc[ succ_traj['goal'].isin([g]) & (succ_traj['pickup'].isin(pickup_candidates) if 'pickup' in goal_to_required_variables[g] else True) & (succ_traj['receptacle'].isin(receptacle_candidates) if 'receptacle' in goal_to_required_variables[g] else True) & (succ_traj['scene'].isin(scene_candidates) if 'scene' in goal_to_required_variables[g] else True)] ['movable'].tolist().count(c) for g in goal_candidates]))) * (1 / (1 + succ_traj['movable'].tolist().count(c))) for c in movable_candidates] movable_probs = [w / sum(movable_weight) for w in movable_weight] receptacle_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) + sum([succ_traj.loc[ succ_traj['goal'].isin([g]) & (succ_traj['pickup'].isin(pickup_candidates) if 'pickup' in goal_to_required_variables[g] else True) & (succ_traj['movable'].isin(movable_candidates) if 'movable' in goal_to_required_variables[g] else True) & (succ_traj['scene'].isin(scene_candidates) if 'scene' in goal_to_required_variables[g] else True)] ['receptacle'].tolist().count(c) for g in goal_candidates]))) * (1 / (1 + succ_traj['receptacle'].tolist().count(c))) for c in receptacle_candidates] receptacle_probs = [w / sum(receptacle_weight) for w in receptacle_weight] scene_weight = [(1 / (1 + np.random.randint(0, inject_noise + 1) + sum([succ_traj.loc[ succ_traj['goal'].isin([g]) & (succ_traj['pickup'].isin(pickup_candidates) if 'pickup' in goal_to_required_variables[g] else True) & (succ_traj['movable'].isin(movable_candidates) if 'movable' in goal_to_required_variables[g] else True) & (succ_traj['receptacle'].isin(receptacle_candidates) if 'receptacle' in goal_to_required_variables[g] else True)] ['scene'].tolist().count(c) for g in goal_candidates]))) * (1 / (1 + succ_traj['scene'].tolist().count(c))) for c in scene_candidates] scene_probs = [w / sum(scene_weight) for w in scene_weight] # Calculate the probability difference between each value and the maximum so we can iterate over them to find a # next-best candidate to sample subject to the constraints of knowing which will fail. diffs = [("goal", goal_candidates[idx], goal_probs[idx] - min(goal_probs)) for idx in range(len(goal_candidates)) if len(goal_candidates) > 1] diffs.extend([("pickup", pickup_candidates[idx], pickup_probs[idx] - min(pickup_probs)) for idx in range(len(pickup_candidates)) if len(pickup_candidates) > 1]) diffs.extend([("movable", movable_candidates[idx], movable_probs[idx] - min(movable_probs)) for idx in range(len(movable_candidates)) if len(movable_candidates) > 1]) diffs.extend([("receptacle", receptacle_candidates[idx], receptacle_probs[idx] - min(receptacle_probs)) for idx in range(len(receptacle_candidates)) if len(receptacle_candidates) > 1]) diffs.extend([("scene", scene_candidates[idx], scene_probs[idx] - min(scene_probs)) for idx in range(len(scene_candidates)) if len(scene_candidates) > 1]) # Iteratively pop the next biggest difference until we find a combination that is valid (e.g., not already # flagged as impossible by the simulator). variable_value_by_diff = {} diffs_as_keys = [] # list of diffs; index into list will be used as key values. for _, _, diff in diffs: already_keyed = False for existing_diff in diffs_as_keys: if np.isclose(existing_diff, diff): already_keyed = True break if not already_keyed: diffs_as_keys.append(diff) for variable, value, diff in diffs: key = None for kidx in range(len(diffs_as_keys)): if np.isclose(diffs_as_keys[kidx], diff): key = kidx if key not in variable_value_by_diff: variable_value_by_diff[key] = [] variable_value_by_diff[key].append((variable, value)) for key, diff in sorted(enumerate(diffs_as_keys), key=lambda x: x[1], reverse=True): variable_value = variable_value_by_diff[key] random.shuffle(variable_value) for variable, value in variable_value: # Select a goal. if variable == "goal": gtype = value # print("sampled goal '%s' with prob %.4f" % (gtype, goal_probs[goal_candidates.index(gtype)])) _goal_candidates = [gtype] _pickup_candidates = pickup_candidates[:] _movable_candidates = movable_candidates[:] _receptacle_candidates = receptacle_candidates[:] _scene_candidates = scene_candidates[:] # Select a pickup object. elif variable == "pickup": pickup_obj = value # print("sampled pickup object '%s' with prob %.4f" % # (pickup_obj, pickup_probs[pickup_candidates.index(pickup_obj)])) _pickup_candidates = [pickup_obj] _goal_candidates = goal_candidates[:] _movable_candidates = movable_candidates[:] _receptacle_candidates = receptacle_candidates[:] _scene_candidates = scene_candidates[:] # Select a movable object. elif variable == "movable": movable_obj = value # print("sampled movable object '%s' with prob %.4f" % # (movable_obj, movable_probs[movable_candidates.index(movable_obj)])) _movable_candidates = [movable_obj] _goal_candidates = [g for g in goal_candidates if g == 'pick_and_place_with_movable_recep'] _pickup_candidates = pickup_candidates[:] _receptacle_candidates = receptacle_candidates[:] _scene_candidates = scene_candidates[:] # Select a receptacle. elif variable == "receptacle": receptacle_obj = value # print("sampled receptacle object '%s' with prob %.4f" % # (receptacle_obj, receptacle_probs[receptacle_candidates.index(receptacle_obj)])) _receptacle_candidates = [receptacle_obj] _goal_candidates = goal_candidates[:] _pickup_candidates = pickup_candidates[:] _movable_candidates = movable_candidates[:] _scene_candidates = scene_candidates[:] # Select a scene. else: sampled_scene = value # print("sampled scene %s with prob %.4f" % # (sampled_scene, scene_probs[scene_candidates.index(sampled_scene)])) _scene_candidates = [sampled_scene] _goal_candidates = goal_candidates[:] _pickup_candidates = pickup_candidates[:] _movable_candidates = movable_candidates[:] _receptacle_candidates = receptacle_candidates[:] # Perform constraint propagation to determine whether this is a valid assignment. propagation_finished = False while not propagation_finished: assignment_lens = (len(_goal_candidates), len(_pickup_candidates), len(_movable_candidates), len(_receptacle_candidates), len(_scene_candidates)) # Constraints on goal. _goal_candidates = [g for g in _goal_candidates if (g not in goal_to_pickup_type or len(set(_pickup_candidates).intersection( # Pickup constraint. constants.VAL_ACTION_OBJECTS[goal_to_pickup_type[g]])) > 0) and (g not in goal_to_receptacle_type or np.any([r in constants.VAL_ACTION_OBJECTS[goal_to_receptacle_type[g]] for r in _receptacle_candidates])) # Valid by goal receptacle const. and (g not in goal_to_invalid_receptacle or len(set(_receptacle_candidates).difference( goal_to_invalid_receptacle[g])) > 0) # Invalid by goal receptacle const. and len(set(_scene_candidates).intersection( scenes_for_goal[g])) > 0 # Scene constraint ] # Define whether to consider constraints for each role based on current set of candidate goals. pickup_constrained = np.any(["pickup" in goal_to_required_variables[g] for g in _goal_candidates]) movable_constrained = np.any(["movable" in goal_to_required_variables[g] for g in _goal_candidates]) receptacle_constrained = np.any(["receptacle" in goal_to_required_variables[g] for g in _goal_candidates]) scene_constrained = np.any(["scene" in goal_to_required_variables[g] for g in _goal_candidates]) # Constraints on pickup obj. _pickup_candidates = [p for p in _pickup_candidates if np.any([g not in goal_to_pickup_type or p in constants.VAL_ACTION_OBJECTS[goal_to_pickup_type[g]] for g in _goal_candidates]) # Goal constraint. and (not movable_constrained or np.any([p in constants.VAL_RECEPTACLE_OBJECTS[m] for m in _movable_candidates])) # Movable constraint. and (not receptacle_constrained or np.any([r in constants.VAL_ACTION_OBJECTS["Toggleable"] or p in constants.VAL_RECEPTACLE_OBJECTS[r] for r in _receptacle_candidates])) # Receptacle constraint. and (not scene_constrained or np.any([s in obj_to_scene_ids[constants.OBJ_PARENTS[p]] for s in _scene_candidates])) # Scene constraint ] # Constraints on movable obj. _movable_candidates = [m for m in _movable_candidates if 'pick_and_place_with_movable_recep' in _goal_candidates # Goal constraint and (not pickup_constrained or np.any([p in constants.VAL_RECEPTACLE_OBJECTS[m] for p in _pickup_candidates])) # Pickup constraint. and (not receptacle_constrained or np.any([r in constants.VAL_RECEPTACLE_OBJECTS and m in constants.VAL_RECEPTACLE_OBJECTS[r] for r in _receptacle_candidates])) # Receptacle constraint. and (not scene_constrained or np.any([s in obj_to_scene_ids[constants.OBJ_PARENTS[m]] for s in _scene_candidates])) # Scene constraint ] # Constraints on receptacle obj. _receptacle_candidates = [r for r in _receptacle_candidates if np.any([(g not in goal_to_receptacle_type or r in constants.VAL_ACTION_OBJECTS[goal_to_receptacle_type[g]]) and (g not in goal_to_invalid_receptacle or r not in goal_to_invalid_receptacle[g]) for g in _goal_candidates]) # Goal constraint. and (not receptacle_constrained or r in constants.VAL_ACTION_OBJECTS["Toggleable"] or np.any([p in constants.VAL_RECEPTACLE_OBJECTS[r] for p in _pickup_candidates])) # Pickup constraint. and (not movable_constrained or r in constants.VAL_ACTION_OBJECTS["Toggleable"] or np.any([m in constants.VAL_RECEPTACLE_OBJECTS[r] for m in _movable_candidates])) # Movable constraint. and (not scene_constrained or np.any([s in obj_to_scene_ids[constants.OBJ_PARENTS[r]] for s in _scene_candidates])) # Scene constraint ] # Constraints on scene. _scene_candidates = [s for s in _scene_candidates if np.any([s in scenes_for_goal[g] for g in _goal_candidates]) # Goal constraint. and (not pickup_constrained or np.any([obj_to_scene_ids[constants.OBJ_PARENTS[p]] for p in _pickup_candidates])) # Pickup constraint. and (not movable_constrained or np.any([obj_to_scene_ids[constants.OBJ_PARENTS[m]] for m in _movable_candidates])) # Movable constraint. and (not receptacle_constrained or np.any([obj_to_scene_ids[constants.OBJ_PARENTS[r]] for r in _receptacle_candidates])) # Receptacle constraint. ] if assignment_lens == (len(_goal_candidates), len(_pickup_candidates), len(_movable_candidates), len(_receptacle_candidates), len(_scene_candidates)): propagation_finished = True candidate_lens = {"goal": len(_goal_candidates), "pickup": len(_pickup_candidates), "movable": len(_movable_candidates), "receptacle": len(_receptacle_candidates), "scene": len(_scene_candidates)} if candidate_lens["goal"] == 0: # print("Goal over-constrained; skipping") continue if np.all([0 in [candidate_lens[v] for v in goal_to_required_variables[g]] for g in _goal_candidates]): continue # Ensure some combination of the remaining constraints is not in failures and is not already populated # by the target number of repeats. failure_ensured = True full_ensured = True for g in _goal_candidates: pickup_iter = _pickup_candidates if "pickup" in goal_to_required_variables[g] else ["None"] for p in pickup_iter: movable_iter = _movable_candidates if "movable" in goal_to_required_variables[g] else ["None"] for m in movable_iter: receptacle_iter = _receptacle_candidates if "receptacle" in goal_to_required_variables[g] \ else ["None"] for r in receptacle_iter: scene_iter = _scene_candidates if "scene" in goal_to_required_variables[g] else ["None"] for s in scene_iter: if (g, p, m, r, s) not in fail_traj: failure_ensured = False if (g, p, m, r, s) not in full_traj: full_ensured = False if not failure_ensured and not full_ensured: break if not failure_ensured and not full_ensured: break if not failure_ensured and not full_ensured: break if not failure_ensured and not full_ensured: break if not failure_ensured and not full_ensured: break if failure_ensured: continue if full_ensured: continue if candidate_lens["goal"] > 1 or np.any([np.any([candidate_lens[v] > 1 for v in goal_to_required_variables[g]]) for g in _goal_candidates]): task_sampler = sample_task_params(succ_traj, full_traj, fail_traj, _goal_candidates, _pickup_candidates, _movable_candidates, _receptacle_candidates, _scene_candidates) sampled_task = next(task_sampler) if sampled_task is None: continue else: g = _goal_candidates[0] p = _pickup_candidates[0] if "pickup" in goal_to_required_variables[g] else "None" m = _movable_candidates[0] if "movable" in goal_to_required_variables[g] else "None" r = _receptacle_candidates[0] if "receptacle" in goal_to_required_variables[g] else "None" s = _scene_candidates[0] if "scene" in goal_to_required_variables[g] else "None" sampled_task = (g, p, m, r, int(s)) yield sampled_task yield None # Discovered that there are no valid assignments remaining. def print_successes(succ_traj): print("###################################\n") print("Successes: ") print(succ_traj) print("\n##################################") def main(args, thread_num=0): print(thread_num) # settings alfred_dataset_path = '../data/json_2.1.0/train' constants.DATA_SAVE_PATH = args.save_path print("Force Unsave Data: %s" % str(args.force_unsave)) # Set up data structure to track dataset balance and use for selecting next parameters. # In actively gathering data, we will try to maximize entropy for each (e.g., uniform spread of goals, # uniform spread over patient objects, uniform recipient objects, and uniform scenes). succ_traj = pd.DataFrame(columns=["goal", "pickup", "movable", "receptacle", "scene"]) # objects-to-scene and scene-to-objects database for scene_type, ids in constants.SCENE_TYPE.items(): for id in ids: obj_json_file = os.path.join('layouts', 'FloorPlan%d-objects.json' % id) with open(obj_json_file, 'r') as of: scene_objs = json.load(of) id_str = str(id) scene_id_to_objs[id_str] = scene_objs for obj in scene_objs: if obj not in obj_to_scene_ids: obj_to_scene_ids[obj] = set() obj_to_scene_ids[obj].add(id_str) # scene-goal database for g in constants.GOALS: for st in constants.GOALS_VALID[g]: scenes_for_goal[g].extend([str(s) for s in constants.SCENE_TYPE[st]]) scenes_for_goal[g] = set(scenes_for_goal[g]) # scene-type database for st in constants.SCENE_TYPE: for s in constants.SCENE_TYPE[st]: scene_to_type[str(s)] = st # pre-populate counts in this structure using saved trajectories path. succ_traj, full_traj = load_successes_from_disk(args.save_path, succ_traj, args.just_examine, args.repeats_per_cond) if args.just_examine: print_successes(succ_traj) return print(succ_traj.groupby('goal').count()) # pre-populate failed trajectories. fail_traj = load_fails_from_disk(args.save_path) print("Loaded %d known failed tuples" % len(fail_traj)) # create env and agent env = ThorEnv(x_display='0.%d' %(thread_num % 2)) game_state = TaskGameStateFullKnowledge(env) agent = DeterministicPlannerAgent(thread_id=0, game_state=game_state) errors = {} # map from error strings to counts, to be shown after every failure. goal_candidates = constants.GOALS[:] pickup_candidates = list(set().union(*[constants.VAL_RECEPTACLE_OBJECTS[obj] # Union objects that can be placed. for obj in constants.VAL_RECEPTACLE_OBJECTS])) pickup_candidates = [p for p in pickup_candidates if constants.OBJ_PARENTS[p] in obj_to_scene_ids] movable_candidates = list(set(constants.MOVABLE_RECEPTACLES).intersection(obj_to_scene_ids.keys())) receptacle_candidates = [obj for obj in constants.VAL_RECEPTACLE_OBJECTS if obj not in constants.MOVABLE_RECEPTACLES and obj in obj_to_scene_ids] + \ [obj for obj in constants.VAL_ACTION_OBJECTS["Toggleable"] if obj in obj_to_scene_ids] # toaster isn't interesting in terms of producing linguistic diversity receptacle_candidates.remove('Toaster') receptacle_candidates.sort() scene_candidates = list(scene_id_to_objs.keys()) n_until_load_successes = args.async_load_every_n_samples print_successes(succ_traj) task_sampler = sample_task_params(succ_traj, full_traj, fail_traj, goal_candidates, pickup_candidates, movable_candidates, receptacle_candidates, scene_candidates) # main generation loop # keeps trying out new task tuples as trajectories either fail or suceed while True: # for _ in range(20): for ii, json_path in enumerate(glob.iglob(os.path.join(alfred_dataset_path, "**", "traj_data.json"), recursive=True)): # if ii % args.num_threads == thread_num: # if ii == 5: sampled_task = json_path.split('/')[-3].split('-') # sampled_task = next(task_sampler) # print("===============") # print(ii, json_path) print(sampled_task) # DEBUG # print("===============") if sampled_task is None: sys.exit("No valid tuples left to sample (all are known to fail or already have %d trajectories" % args.repeats_per_cond) gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene = sampled_task sampled_scene = int(sampled_scene) print("sampled tuple: " + str((gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene))) tries_remaining = args.trials_before_fail # only try to get the number of trajectories left to make this tuple full. target_remaining = args.repeats_per_cond - len(succ_traj.loc[(succ_traj['goal'] == gtype) & (succ_traj['pickup'] == pickup_obj) & (succ_traj['movable'] == movable_obj) & (succ_traj['receptacle'] == receptacle_obj) & (succ_traj['scene'] == str(sampled_scene))]) num_place_fails = 0 # count of errors related to placement failure for no valid positions. # continue until we're (out of tries + have never succeeded) or (have gathered the target number of instances) while num_place_fails > args.trials_before_fail or target_remaining > 0: # environment setup constants.pddl_goal_type = gtype print("PDDLGoalType: " + constants.pddl_goal_type) task_id = create_dirs(gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene) # setup data dictionary setup_data_dict() constants.data_dict['task_id'] = task_id constants.data_dict['task_type'] = constants.pddl_goal_type constants.data_dict['dataset_params']['video_frame_rate'] = constants.VIDEO_FRAME_RATE # plan & execute try: # if True: # Agent reset to new scene. constraint_objs = {'repeat': [(constants.OBJ_PARENTS[pickup_obj], # Generate multiple parent objs. np.random.randint(2 if gtype == "pick_two_obj_and_place" else 1, constants.PICKUP_REPEAT_MAX + 1))], 'sparse': [(receptacle_obj.replace('Basin', ''), num_place_fails * constants.RECEPTACLE_SPARSE_POINTS)]} if movable_obj != "None": constraint_objs['repeat'].append((movable_obj, np.random.randint(1, constants.PICKUP_REPEAT_MAX + 1))) for obj_type in scene_id_to_objs[str(sampled_scene)]: if (obj_type in pickup_candidates and obj_type != constants.OBJ_PARENTS[pickup_obj] and obj_type != movable_obj): constraint_objs['repeat'].append((obj_type, np.random.randint(1, constants.MAX_NUM_OF_OBJ_INSTANCES + 1))) if gtype in goal_to_invalid_receptacle: constraint_objs['empty'] = [(r.replace('Basin', ''), num_place_fails * constants.RECEPTACLE_EMPTY_POINTS) for r in goal_to_invalid_receptacle[gtype]] constraint_objs['seton'] = [] if gtype == 'look_at_obj_in_light': constraint_objs['seton'].append((receptacle_obj, False)) if num_place_fails > 0: print("Failed %d placements in the past; increased free point constraints: " % num_place_fails + str(constraint_objs)) scene_info = {'scene_num': sampled_scene, 'random_seed': random.randint(0, 2 ** 32)} info = agent.reset(scene=scene_info, objs=constraint_objs) # Problem initialization with given constraints. task_objs = {'pickup': pickup_obj} if movable_obj != "None": task_objs['mrecep'] = movable_obj if gtype == "look_at_obj_in_light": task_objs['toggle'] = receptacle_obj else: task_objs['receptacle'] = receptacle_obj agent.setup_problem({'info': info}, scene=scene_info, objs=task_objs) # Now that objects are in their initial places, record them. object_poses = [{'objectName': obj['name'].split('(Clone)')[0], 'position': obj['position'], 'rotation': obj['rotation']} for obj in env.last_event.metadata['objects'] if obj['pickupable']] dirty_and_empty = gtype == 'pick_clean_then_place_in_recep' object_toggles = [{'objectType': o, 'stateChange': 'toggleable', 'isToggled': v} for o, v in constraint_objs['seton']] constants.data_dict['scene']['object_poses'] = object_poses constants.data_dict['scene']['dirty_and_empty'] = dirty_and_empty constants.data_dict['scene']['object_toggles'] = object_toggles # Pre-restore the scene to cause objects to "jitter" like they will when the episode is replayed # based on stored object and toggle info. This should put objects closer to the final positions they'll # be inlay at inference time (e.g., mugs fallen and broken, knives fallen over, etc.). print("Performing reset via thor_env API") env.reset(sampled_scene) print("Performing restore via thor_env API") env.restore_scene(object_poses, object_toggles, dirty_and_empty) event = env.step(dict(constants.data_dict['scene']['init_action'])) terminal = False while not terminal and agent.current_frame_count <= constants.MAX_EPISODE_LENGTH: action_dict = agent.get_action(None) agent.step(action_dict) reward, terminal = agent.get_reward() dump_data_dict() save_video() # else: except Exception as e: import traceback traceback.print_exc() print("Error: " + repr(e)) print("Invalid Task: skipping...") if args.debug: print(traceback.format_exc()) deleted = delete_save(args.in_parallel) if not deleted: # another thread is filling this task successfully, so leave it alone. target_remaining = 0 # stop trying to do this task. else: if str(e) == "API Action Failed: No valid positions to place object found": # Try increasing the space available on sparse and empty flagged objects. num_place_fails += 1 tries_remaining -= 1 else: # generic error tries_remaining -= 1 estr = str(e) if len(estr) > 120: estr = estr[:120] if estr not in errors: errors[estr] = 0 errors[estr] += 1 print("%%%%%%%%%%") es = sum([errors[er] for er in errors]) print("\terrors (%d):" % es) for er, v in sorted(errors.items(), key=lambda kv: kv[1], reverse=True): if v / es < 0.01: # stop showing below 1% of errors. break print("\t(%.2f) (%d)\t%s" % (v / es, v, er)) print("%%%%%%%%%%") continue if args.force_unsave: delete_save(args.in_parallel) # add to save structure. succ_traj = succ_traj.append({ "goal": gtype, "movable": movable_obj, "pickup": pickup_obj, "receptacle": receptacle_obj, "scene": str(sampled_scene)}, ignore_index=True) target_remaining -= 1 tries_remaining += args.trials_before_fail # on success, add more tries for future successes # if this combination resulted in a certain number of failures with no successes, flag it as not possible. if tries_remaining == 0 and target_remaining == args.repeats_per_cond: new_fails = [(gtype, pickup_obj, movable_obj, receptacle_obj, str(sampled_scene))] fail_traj = load_fails_from_disk(args.save_path, to_write=new_fails) print("%%%%%%%%%%") print("failures (%d)" % len(fail_traj)) # print("\t" + "\n\t".join([str(ft) for ft in fail_traj])) print("%%%%%%%%%%") # if this combination gave us the repeats we wanted, note it as filled. if target_remaining == 0: full_traj.add((gtype, pickup_obj, movable_obj, receptacle_obj, sampled_scene)) # if we're sharing with other processes, reload successes from disk to update local copy with others' additions. if args.in_parallel: if n_until_load_successes > 0: n_until_load_successes -= 1 else: print("Reloading trajectories from disk because of parallel processes...") succ_traj = pd.DataFrame(columns=succ_traj.columns) # Drop all rows. succ_traj, full_traj = load_successes_from_disk(args.save_path, succ_traj, False, args.repeats_per_cond) print("... Loaded %d trajectories" % len(succ_traj.index)) n_until_load_successes = args.async_load_every_n_samples print_successes(succ_traj) task_sampler = sample_task_params(succ_traj, full_traj, fail_traj, goal_candidates, pickup_candidates, movable_candidates, receptacle_candidates, scene_candidates) print("... Created fresh instance of sample_task_params generator") def create_dirs(gtype, pickup_obj, movable_obj, receptacle_obj, scene_num): task_id = 'trial_T' + datetime.now().strftime("%Y%m%d_%H%M%S_%f") save_name = '%s-%s-%s-%s-%d' % (gtype, pickup_obj, movable_obj, receptacle_obj, scene_num) + '/' + task_id constants.save_path = os.path.join(constants.DATA_SAVE_PATH, save_name, RAW_IMAGES_FOLDER) constants.save_depth_path = os.path.join(constants.DATA_SAVE_PATH, save_name, DEPTH_IMAGES_FOLDER) if not os.path.exists(constants.save_path): os.makedirs(constants.save_path) if not os.path.exists(constants.save_depth_path): os.makedirs(constants.save_depth_path) print("Saving images to: " + constants.save_path) return task_id def save_video(): images_path = constants.save_path + '*.png' video_path = os.path.join(constants.save_path.replace(RAW_IMAGES_FOLDER, ''), 'video.mp4') video_saver.save(images_path, video_path) def setup_data_dict(): constants.data_dict = OrderedDict() constants.data_dict['task_id'] = "" constants.data_dict['task_type'] = "" constants.data_dict['scene'] = {'floor_plan': "", 'random_seed': -1, 'scene_num': -1, 'init_action': [], 'object_poses': [], 'dirty_and_empty': None, 'object_toggles': []} constants.data_dict['plan'] = {'high_pddl': [], 'low_actions': []} constants.data_dict['images'] = [] constants.data_dict['template'] = {'task_desc': "", 'high_descs': []} constants.data_dict['pddl_params'] = {'object_target': -1, 'object_sliced': -1, 'parent_target': -1, 'toggle_target': -1, 'mrecep_target': -1} constants.data_dict['dataset_params'] = {'video_frame_rate': -1} constants.data_dict['pddl_state'] = [] def dump_data_dict(): data_save_path = constants.save_path.replace(RAW_IMAGES_FOLDER, '') with open(os.path.join(data_save_path, DATA_JSON_FILENAME), 'w') as fp: json.dump(constants.data_dict, fp, sort_keys=True, indent=4) def delete_save(in_parallel): save_folder = constants.save_path.replace(RAW_IMAGES_FOLDER, '') if os.path.exists(save_folder): try: shutil.rmtree(save_folder) except OSError as e: if in_parallel: # another thread succeeded at this task while this one failed. return False else: raise e # if we're not running in parallel, this is an actual. return True def parallel_main(args): procs = [mp.Process(target=main, args=(args,thread_num)) for thread_num in range(args.num_threads)] try: for proc in procs: proc.start() time.sleep(0.1) finally: for proc in procs: proc.join() if __name__ == "__main__": parser = argparse.ArgumentParser() # settings parser.add_argument('--force_unsave', action='store_true', help="don't save any data (for debugging purposes)") parser.add_argument('--debug', action='store_true') parser.add_argument('--save_path', type=str, default="dataset/new_trajectories_valid_seen", help="where to save the generated data") parser.add_argument('--x_display', type=str, required=False, default=constants.X_DISPLAY, help="x_display id") parser.add_argument("--just_examine", action='store_true', help="just examine what data is gathered; don't gather more") parser.add_argument("--in_parallel", action='store_true', help="this collection will run in parallel with others, so load from disk on every new sample") parser.add_argument("-n", "--num_threads", type=int, default=1, help="number of processes for parallel mode") parser.add_argument('--json_file', type=str, default="", help="path to json file with trajectory dump") # params parser.add_argument("--repeats_per_cond", type=int, default=3) parser.add_argument("--trials_before_fail", type=int, default=5) parser.add_argument("--async_load_every_n_samples", type=int, default=10) parser.add_argument('--gpu_id', type=int, default=0) parse_args = parser.parse_args() # if parse_args.in_parallel and parse_args.num_threads > 1: # parallel_main(parse_args) # else: main(parse_args)
models.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from scheduled_job_client.notification import ( notify_job_start, notify_job_status) from scheduled_job_client.job import start_background_job from django.db import models from django.utils.timezone import localtime, now import threading # Models support local scheduled job instance class ScheduledJob(models.Model): """ Represents provisioning commands. """ job_id = models.CharField(max_length=36, unique=True) job_label = models.CharField(max_length=128) pid = models.SmallIntegerField(null=True) start_date = models.DateTimeField(auto_now_add=True) end_date = models.DateTimeField(null=True) progress = models.SmallIntegerField(null=True) exit_status = models.SmallIntegerField(null=True) exit_output = models.CharField(max_length=512, null=True) # BUG should job table include logged data? pointer/reference to log file? def launch(self): if self.pid is None: thread = threading.Thread( target=start_background_job, args=(self,)) thread.daemon = True thread.start() self.start_date = now() self.sav() def save(self, *args, **kwargs): notify_job_status({'jobs': {self.job_id: self.json_data()}}) super(ScheduledJob, self).save(*args, **kwargs) def json_data(self): return { 'job_id': self.job_id, 'job_label': self.job_label, 'start_date': localtime(self.start_date).isoformat() if ( self.start_date is not None) else None, 'end_date': localtime(self.end_date).isoformat() if ( self.end_date is not None) else None, 'progress': self.progress, 'exit_status': self.exit_status, 'exit_output': self.exit_output }
Thread3.py
from threading import Thread import time """ 多线程中使用全局变量 """ # 注意线程与进程不同的是,多线程间是可以共享全局变量的 g_num = 100 def work1(): global g_num for i in range(3): g_num += 1 print('--- in work1, g_num is %d ---' % g_num) def work2(): global g_num print('--- in work2, g_num is %d ---' % g_num) print('--- 线程创建之前,g_num is %d ---' % g_num) t1 = Thread(target=work1) t1.start() time.sleep(1) t2 = Thread(target=work2) t2.start()
config_reader.py
import copy import multiprocessing as mp def process_configs(target, arg_parser): print('2') args, _ = arg_parser.parse_known_args() print('args in process_configs:', args) ctx = mp.get_context('spawn') for run_args, _run_config, _run_repeat in _yield_configs(arg_parser, args): p = ctx.Process(target=target, args=(run_args,)) p.start() p.join() def _read_config(path): lines = open(path).readlines() runs = [] run = [1, dict()] for line in lines: stripped_line = line.strip() # continue in case of comment if stripped_line.startswith('#'): continue if not stripped_line: if run[1]: runs.append(run) run = [1, dict()] continue if stripped_line.startswith('[') and stripped_line.endswith(']'): repeat = int(stripped_line[1:-1]) run[0] = repeat else: key, value = stripped_line.split('=') key, value = (key.strip(), value.strip()) run[1][key] = value if run[1]: runs.append(run) return runs def _convert_config(config): config_list = [] for k, v in config.items(): if v.lower() == 'true': config_list.append('--' + k) elif v.lower() != 'false': config_list.extend(['--' + k] + v.split(' ')) return config_list def _yield_configs(arg_parser, args, verbose=True): _print = (lambda x: print(x)) if verbose else lambda x: x if args.config: config = _read_config(args.config) for run_repeat, run_config in config: print("-" * 50) print("Config:") print(run_config) args_copy = copy.deepcopy(args) config_list = _convert_config(run_config) run_args = arg_parser.parse_args(config_list, namespace=args_copy) run_args_dict = vars(run_args) # set boolean values for k, v in run_config.items(): if v.lower() == 'false': run_args_dict[k] = False print("Repeat %s times" % run_repeat) print("-" * 50) for iteration in range(run_repeat): _print("Iteration %s" % iteration) _print("-" * 50) yield run_args, run_config, run_repeat else: yield args, None, None
test_remote_datatypes.py
import itertools import time from threading import Thread from assemblyline.common.uid import get_random_id # noinspection PyShadowingNames def test_hash(redis_connection): if redis_connection: from assemblyline.remote.datatypes.hash import Hash with Hash('test-hashmap') as h: assert h.add("key", "value") == 1 assert h.exists("key") == 1 assert h.get("key") == "value" assert h.set("key", "new-value") == 0 assert h.keys() == ["key"] assert h.length() == 1 assert h.items() == {"key": "new-value"} assert h.pop("key") == "new-value" assert h.length() == 0 assert h.add("key", "value") == 1 assert h.conditional_remove("key", "value1") is False assert h.conditional_remove("key", "value") is True assert h.length() == 0 # Make sure we can limit the size of a hash table assert h.limited_add("a", 1, 2) == 1 assert h.limited_add("a", 1, 2) == 0 assert h.length() == 1 assert h.limited_add("b", 10, 2) == 1 assert h.length() == 2 assert h.limited_add("c", 1, 2) is None assert h.length() == 2 assert h.pop("a") # Can we increment integer values in the hash assert h.increment("a") == 1 assert h.increment("a") == 2 assert h.increment("a", 10) == 12 assert h.increment("a", -22) == -10 h.delete() # Load a bunch of items and test iteration data_before = [''.join(_x) for _x in itertools.product('abcde', repeat=5)] data_before = {_x: _x + _x for _x in data_before} h.multi_set(data_before) data_after = {} for key, value in h: data_after[key] = value assert data_before == data_after # noinspection PyShadowingNames def test_expiring_hash(redis_connection): if redis_connection: from assemblyline.remote.datatypes.hash import ExpiringHash with ExpiringHash('test-expiring-hashmap', ttl=1) as eh: assert eh.add("key", "value") == 1 assert eh.length() == 1 time.sleep(1.1) assert eh.length() == 0 # noinspection PyShadowingNames def test_basic_counters(redis_connection): if redis_connection: from assemblyline.remote.datatypes.counters import Counters with Counters('test-counter') as ct: ct.delete() for x in range(10): ct.inc('t1') for x in range(20): ct.inc('t2', value=2) ct.dec('t1') ct.dec('t2') assert sorted(ct.get_queues()) == ['test-counter-t1', 'test-counter-t2'] assert ct.get_queues_sizes() == {'test-counter-t1': 9, 'test-counter-t2': 39} ct.reset_queues() assert ct.get_queues_sizes() == {'test-counter-t1': 0, 'test-counter-t2': 0} # noinspection PyShadowingNames def test_tracked_counters(redis_connection): if redis_connection: from assemblyline.remote.datatypes.counters import Counters with Counters('tracked-test-counter', track_counters=True) as ct: ct.delete() for x in range(10): ct.inc('t1') for x in range(20): ct.inc('t2', value=2) assert ct.tracker.keys() == ['t1', 't2'] ct.dec('t1') ct.dec('t2') assert ct.tracker.keys() == [] assert sorted(ct.get_queues()) == ['tracked-test-counter-t1', 'tracked-test-counter-t2'] assert ct.get_queues_sizes() == {'tracked-test-counter-t1': 9, 'tracked-test-counter-t2': 39} ct.reset_queues() assert ct.get_queues_sizes() == {'tracked-test-counter-t1': 0, 'tracked-test-counter-t2': 0} # noinspection PyShadowingNames def test_sets(redis_connection): if redis_connection: from assemblyline.remote.datatypes.set import Set with Set('test-set') as s: s.delete() values = ['a', 'b', 1, 2] assert s.add(*values) == 4 assert s.length() == 4 for x in s.members(): assert x in values assert s.random() in values assert s.exist(values[2]) s.remove(values[2]) assert not s.exist(values[2]) pop_val = s.pop() assert pop_val in values assert not s.exist(pop_val) assert s.length() == 2 assert s.limited_add('dog', 3) assert not s.limited_add('cat', 3) assert s.exist('dog') assert not s.exist('cat') assert s.length() == 3 # noinspection PyShadowingNames def test_expiring_sets(redis_connection): if redis_connection: from assemblyline.remote.datatypes.set import ExpiringSet with ExpiringSet('test-expiring-set', ttl=1) as es: es.delete() values = ['a', 'b', 1, 2] assert es.add(*values) == 4 assert es.length() == 4 assert es.exist(values[2]) for x in es.members(): assert x in values time.sleep(1.1) assert es.length() == 0 assert not es.exist(values[2]) # noinspection PyShadowingNames def test_lock(redis_connection): if redis_connection: from assemblyline.remote.datatypes.lock import Lock def locked_execution(next_thread=None): with Lock('test', 10): if next_thread: next_thread.start() time.sleep(2) t2 = Thread(target=locked_execution) t1 = Thread(target=locked_execution, args=(t2,)) t1.start() time.sleep(1) assert t1.is_alive() assert t2.is_alive() time.sleep(2) assert not t1.is_alive() assert t2.is_alive() time.sleep(2) assert not t1.is_alive() assert not t2.is_alive() # noinspection PyShadowingNames,PyUnusedLocal def test_priority_queue(redis_connection): from assemblyline.remote.datatypes.queues.priority import PriorityQueue, length, select with PriorityQueue('test-priority-queue') as pq: pq.delete() for x in range(10): pq.push(100, x) a_key = pq.push(101, 'a') z_key = pq.push(99, 'z') assert pq.rank(a_key) == 0 assert pq.rank(z_key) == pq.length() - 1 assert pq.pop() == 'a' assert pq.unpush() == 'z' assert pq.count(100, 100) == 10 assert pq.pop() == 0 assert pq.unpush() == 9 assert pq.length() == 8 assert pq.pop(4) == [1, 2, 3, 4] assert pq.unpush(3) == [6, 7, 8] assert pq.length() == 1 # Should be [<100, 5>] at this point for x in range(5): pq.push(100 + x, x) assert pq.length() == 6 assert pq.dequeue_range(lower_limit=106) == [] assert pq.length() == 6 assert pq.dequeue_range(lower_limit=103) == [4] # 3 and 4 are both options, 4 has higher score assert pq.dequeue_range(lower_limit=102, skip=1) == [2] # 2 and 3 are both options, 3 has higher score, skip it assert pq.dequeue_range(upper_limit=100, num=10) == [5, 0] # Take some off the other end assert pq.length() == 2 with PriorityQueue('second-priority-queue') as other: other.push(100, 'a') assert length(other, pq) == [1, 2] select(other, pq) select(other, pq) select(other, pq) assert length(other, pq) == [0, 0] pq.push(50, 'first') pq.push(-50, 'second') assert pq.dequeue_range(0, 100) == ['first'] assert pq.dequeue_range(-100, 0) == ['second'] # noinspection PyShadowingNames,PyUnusedLocal def test_unique_priority_queue(redis_connection): from assemblyline.remote.datatypes.queues.priority import UniquePriorityQueue with UniquePriorityQueue('test-priority-queue') as pq: pq.delete() for x in range(10): pq.push(100, x) assert pq.length() == 10 # Values should be unique, this should have no effect on the length for x in range(10): pq.push(100, x) assert pq.length() == 10 pq.push(101, 'a') pq.push(99, 'z') assert pq.pop() == 'a' assert pq.unpush() == 'z' assert pq.count(100, 100) == 10 assert pq.pop() == 0 assert pq.unpush() == 9 assert pq.length() == 8 assert pq.pop(4) == [1, 2, 3, 4] assert pq.unpush(3) == [6, 7, 8] assert pq.length() == 1 # Should be [<100, 5>] at this point for x in range(5): pq.push(100 + x, x) assert pq.length() == 6 assert pq.dequeue_range(lower_limit=106) == [] assert pq.length() == 6 assert pq.dequeue_range(lower_limit=103) == [4] # 3 and 4 are both options, 4 has higher score assert pq.dequeue_range(lower_limit=102, skip=1) == [2] # 2 and 3 are both options, 3 has higher score, skip it assert sorted(pq.dequeue_range(upper_limit=100, num=10)) == [0, 5] # Take some off the other end assert pq.length() == 2 pq.pop(2) pq.push(50, 'first') pq.push(-50, 'second') assert pq.dequeue_range(0, 100) == ['first'] assert pq.dequeue_range(-100, 0) == ['second'] # noinspection PyShadowingNames def test_named_queue(redis_connection): if redis_connection: from assemblyline.remote.datatypes.queues.named import NamedQueue, select with NamedQueue('test-named-queue') as nq: nq.delete() for x in range(5): nq.push(x) assert nq.length() == 5 nq.push(*list(range(5))) assert nq.length() == 10 assert nq.peek_next() == nq.pop() assert nq.peek_next() == 1 v = nq.pop() assert v == 1 assert nq.peek_next() == 2 nq.unpop(v) assert nq.peek_next() == 1 assert select(nq) == ('test-named-queue', 1) with NamedQueue('test-named-queue-1') as nq1: nq1.delete() with NamedQueue('test-named-queue-2') as nq2: nq2.delete() nq1.push(1) nq2.push(2) assert select(nq1, nq2) == ('test-named-queue-1', 1) assert select(nq1, nq2) == ('test-named-queue-2', 2) # noinspection PyShadowingNames def test_multi_queue(redis_connection): if redis_connection: from assemblyline.remote.datatypes.queues.multi import MultiQueue mq = MultiQueue() mq.delete('test-multi-q1') mq.delete('test-multi-q2') for x in range(5): mq.push('test-multi-q1', x+1) mq.push('test-multi-q2', x+6) assert mq.length('test-multi-q1') == 5 assert mq.length('test-multi-q2') == 5 assert mq.pop('test-multi-q1') == 1 assert mq.pop('test-multi-q2') == 6 assert mq.length('test-multi-q1') == 4 assert mq.length('test-multi-q2') == 4 mq.delete('test-multi-q1') mq.delete('test-multi-q2') assert mq.length('test-multi-q1') == 0 assert mq.length('test-multi-q2') == 0 # noinspection PyShadowingNames def test_comms_queue(redis_connection): if redis_connection: from assemblyline.remote.datatypes.queues.comms import CommsQueue def publish_messages(message_list): time.sleep(0.1) with CommsQueue('test-comms-queue') as cq_p: for message in message_list: cq_p.publish(message) msg_list = ["bob", 1, {"bob": 1}, [1, 2, 3], None, "Nice!", "stop"] t = Thread(target=publish_messages, args=(msg_list,)) t.start() with CommsQueue('test-comms-queue') as cq: x = 0 for msg in cq.listen(): if msg == "stop": break assert msg == msg_list[x] x += 1 t.join() assert not t.is_alive() # noinspection PyShadowingNames def test_user_quota_tracker(redis_connection): if redis_connection: from assemblyline.remote.datatypes.user_quota_tracker import UserQuotaTracker max_quota = 3 timeout = 2 name = get_random_id() uqt = UserQuotaTracker('test-quota', timeout=timeout) # First 0 to max_quota items should succeed for _ in range(max_quota): assert uqt.begin(name, max_quota) is True # All other items should fail until items timeout for _ in range(max_quota): assert uqt.begin(name, max_quota) is False # if you remove and item only one should be able to go in uqt.end(name) assert uqt.begin(name, max_quota) is True assert uqt.begin(name, max_quota) is False # if you wait the timeout, all items can go in time.sleep(timeout+1) for _ in range(max_quota): assert uqt.begin(name, max_quota) is True
flight_manager.py
"""Reusable controller for starting flight processes""" import argparse import logging from multiprocessing import Process, Queue from multiprocessing.managers import BaseManager from logger import init_logger, worker_configurer from communication import Communication from flight.flight import flight from flight.state_settings import StateSettings class FlightManager: def __init__(self, state_settings: StateSettings = None): if state_settings is None: state_settings = StateSettings() self.state_settings = state_settings def main(self) -> None: parser: argparse.ArgumentParser = argparse.ArgumentParser() parser.add_argument( "-s", "--simulation", help="using the simulator", action="store_true" ) args = parser.parse_args() logging.debug( "Simulation flag %s", "enabled" if args.simulation else "disabled" ) self.run_threads(args.simulation) def init_flight(self, flight_args) -> Process: return Process(target=flight, name="flight", args=flight_args) def run_threads(self, sim: bool) -> None: # Register Communication object to Base Manager BaseManager.register("Communication", Communication) # Create manager object manager: BaseManager = BaseManager() # Start manager manager.start() # Create Communication object from manager comm_obj = manager.Communication() log_queue: Queue = Queue(-1) logging_process = init_logger(log_queue) logging_process.start() worker_configurer(log_queue) # Create new processes logging.info("Spawning Processes") flight_args = (comm_obj, sim, log_queue, worker_configurer, self.state_settings) flight_process: Process = self.init_flight(flight_args) # Start flight function flight_process.start() logging.debug("Flight process with id %d started", flight_process.pid) logging.debug(f"Title: {self.state_settings.run_title}") logging.debug(f"Description: {self.state_settings.run_description}") try: while comm_obj.get_state() != "final": # If the process is no longer alive, # (i.e. error has been raised in this case) # then create a new instance and start the new process # (i.e. restart the process) if flight_process.is_alive() is not True: logging.error("Flight process terminated, restarting") flight_process: Process = self.init_flight(flight_args) flight_process.start() except KeyboardInterrupt: # Ctrl-C was pressed # TODO send a message to the flight process to land instead of # basically overwriting the process logging.info("Ctrl-C Pressed, forcing drone to land") comm_obj.set_state("land") flight_process: Process = self.init_flight(flight_args) flight_process.start() # Join flight process before exiting function flight_process.join() logging.info("All processes ended, Goodbye!") logging_process.stop()
ctp_gateway.py
""" """ import sys import traceback import json from datetime import datetime, timedelta from time import time from copy import copy, deepcopy from functools import lru_cache from typing import List import pandas as pd from vnpy.api.ctp import ( MdApi, TdApi, THOST_FTDC_OAS_Submitted, THOST_FTDC_OAS_Accepted, THOST_FTDC_OAS_Rejected, THOST_FTDC_OST_NoTradeQueueing, THOST_FTDC_OST_PartTradedQueueing, THOST_FTDC_OST_AllTraded, THOST_FTDC_OST_Canceled, THOST_FTDC_D_Buy, THOST_FTDC_D_Sell, THOST_FTDC_PD_Long, THOST_FTDC_PD_Short, THOST_FTDC_OPT_LimitPrice, THOST_FTDC_OPT_AnyPrice, THOST_FTDC_OF_Open, THOST_FTDC_OFEN_Close, THOST_FTDC_OFEN_CloseYesterday, THOST_FTDC_OFEN_CloseToday, THOST_FTDC_PC_Futures, THOST_FTDC_PC_Options, THOST_FTDC_PC_SpotOption, THOST_FTDC_PC_Combination, THOST_FTDC_CP_CallOptions, THOST_FTDC_CP_PutOptions, THOST_FTDC_HF_Speculation, THOST_FTDC_CC_Immediately, THOST_FTDC_FCC_NotForceClose, THOST_FTDC_TC_GFD, THOST_FTDC_VC_AV, THOST_FTDC_TC_IOC, THOST_FTDC_VC_CV, THOST_FTDC_AF_Delete ) from vnpy.trader.constant import ( Direction, Offset, Exchange, OrderType, Product, Status, OptionType, Interval ) from vnpy.trader.gateway import BaseGateway, TickCombiner, IndexGenerator from vnpy.trader.object import ( TickData, BarData, OrderData, TradeData, PositionData, AccountData, ContractData, OrderRequest, CancelRequest, SubscribeRequest, HistoryRequest ) from vnpy.trader.utility import ( extract_vt_symbol, get_folder_path, get_trading_date, get_underlying_symbol, round_to, BarGenerator, print_dict ) from vnpy.trader.event import EVENT_TIMER from vnpy.api.websocket import WebsocketClient # 增加通达信指数接口行情 from time import sleep from threading import Thread from pytdx.exhq import TdxExHq_API from vnpy.amqp.consumer import subscriber from vnpy.data.tdx.tdx_common import ( TDX_FUTURE_HOSTS, get_future_contracts, save_future_contracts, get_cache_json, save_cache_json, TDX_FUTURE_CONFIG) from vnpy.component.base import ( MARKET_DAY_ONLY, NIGHT_MARKET_23, NIGHT_MARKET_SQ2 ) STATUS_CTP2VT = { THOST_FTDC_OAS_Submitted: Status.SUBMITTING, THOST_FTDC_OAS_Accepted: Status.SUBMITTING, THOST_FTDC_OAS_Rejected: Status.REJECTED, THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED, THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED, THOST_FTDC_OST_AllTraded: Status.ALLTRADED, THOST_FTDC_OST_Canceled: Status.CANCELLED } DIRECTION_VT2CTP = { Direction.LONG: THOST_FTDC_D_Buy, Direction.SHORT: THOST_FTDC_D_Sell } DIRECTION_CTP2VT = {v: k for k, v in DIRECTION_VT2CTP.items()} DIRECTION_CTP2VT[THOST_FTDC_PD_Long] = Direction.LONG DIRECTION_CTP2VT[THOST_FTDC_PD_Short] = Direction.SHORT ORDERTYPE_VT2CTP = { OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice, OrderType.MARKET: THOST_FTDC_OPT_AnyPrice } ORDERTYPE_CTP2VT = {v: k for k, v in ORDERTYPE_VT2CTP.items()} OFFSET_VT2CTP = { Offset.OPEN: THOST_FTDC_OF_Open, Offset.CLOSE: THOST_FTDC_OFEN_Close, Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday, Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday, } OFFSET_CTP2VT = {v: k for k, v in OFFSET_VT2CTP.items()} EXCHANGE_CTP2VT = { "CFFEX": Exchange.CFFEX, "SHFE": Exchange.SHFE, "CZCE": Exchange.CZCE, "DCE": Exchange.DCE, "INE": Exchange.INE, "SPD": Exchange.SPD } PRODUCT_CTP2VT = { THOST_FTDC_PC_Futures: Product.FUTURES, THOST_FTDC_PC_Options: Product.OPTION, THOST_FTDC_PC_SpotOption: Product.OPTION, THOST_FTDC_PC_Combination: Product.SPREAD } OPTIONTYPE_CTP2VT = { THOST_FTDC_CP_CallOptions: OptionType.CALL, THOST_FTDC_CP_PutOptions: OptionType.PUT } MAX_FLOAT = sys.float_info.max symbol_exchange_map = {} option_name_map = {} symbol_name_map = {} symbol_size_map = {} index_contracts = {} # tdx 期货配置本地缓存 future_contracts = get_future_contracts() # 时间戳对齐 TIME_GAP = 8 * 60 * 60 * 1000000000 INTERVAL_VT2TQ = { Interval.MINUTE: 60, Interval.HOUR: 60 * 60, Interval.DAILY: 60 * 60 * 24, } TQ2VT_TYPE = { "FUTURE_OPTION": Product.OPTION, "INDEX": Product.INDEX, "FUTURE_COMBINE": Product.SPREAD, "SPOT": Product.SPOT, "FUTURE_CONT": Product.INDEX, "FUTURE": Product.FUTURES, "FUTURE_INDEX": Product.INDEX, "OPTION": Product.OPTION, } @lru_cache(maxsize=9999) def vt_to_tq_symbol(symbol: str, exchange: Exchange) -> str: """ TQSdk exchange first """ for count, word in enumerate(symbol): if word.isdigit(): break fix_symbol = symbol if exchange in [Exchange.INE, Exchange.SHFE, Exchange.DCE]: fix_symbol = symbol.lower() # Check for index symbol time_str = symbol[count:] if time_str in ["88"]: return f"KQ.m@{exchange.value}.{fix_symbol[:count]}" if time_str in ["99"]: return f"KQ.i@{exchange.value}.{fix_symbol[:count]}" return f"{exchange.value}.{fix_symbol}" @lru_cache(maxsize=9999) def tq_to_vt_symbol(tq_symbol: str) -> str: """""" if "KQ.m" in tq_symbol: ins_type, instrument = tq_symbol.split("@") exchange, symbol = instrument.split(".") return f"{symbol}88.{exchange}" elif "KQ.i" in tq_symbol: ins_type, instrument = tq_symbol.split("@") exchange, symbol = instrument.split(".") return f"{symbol}99.{exchange}" else: exchange, symbol = tq_symbol.split(".") return f"{symbol}.{exchange}" class CtpGateway(BaseGateway): """ VN Trader Gateway for CTP . """ default_setting = { "用户名": "", "密码": "", "经纪商代码": "", "交易服务器": "", "行情服务器": "", "产品名称": "", "授权编码": "", "产品信息": "" } # 注 # 如果采用rabbit_mq拓展tdx指数行情,default_setting中,需要增加: # "rabbit": # { # "host": "192.168.1.211", # "exchange": "x_fanout_idx_tick" # } exchanges = list(EXCHANGE_CTP2VT.values()) def __init__(self, event_engine, gateway_name="CTP"): """Constructor""" super().__init__(event_engine, gateway_name) self.td_api = None self.md_api = None self.l2_md_api = None self.tdx_api = None self.rabbit_api = None self.tq_api = None self.subscribed_symbols = set() # 已订阅合约代码 self.combiner_conf_dict = {} # 保存合成器配置 # 自定义价差/加比的tick合成器 self.combiners = {} self.tick_combiner_map = {} # 本地指数行情合成器{ 'rb2110':x, 'rb2201':x',,} self.index_generators = {} # 已经创建得指数合成器symbol列表 ['RB99','J99',,,] self.subscribed_index_symbols = [] def connect(self, setting: dict): """ 连接交易服务器、行情服务器 行情服务器包括:ctp普通行情、ctp5档行情(上海、能源所)、tdx指数行情、rabbitMQ指数行情、天勤指数行情 :param setting: :return: """ userid = setting["用户名"] password = setting["密码"] brokerid = setting["经纪商代码"] td_address = setting["交易服务器"] md_address = setting["行情服务器"] md_address_level2 = setting.get("行情服务器_五档", None) appid = setting["产品名称"] auth_code = setting["授权编码"] product_info = setting["产品信息"] rabbit_dict = setting.get('rabbit', None) tq_dict = setting.get('tq', None) tdx_dict = setting.get('tdx', None) if ( (not td_address.startswith("tcp://")) and (not td_address.startswith("ssl://")) ): td_address = "tcp://" + td_address if ( (not md_address.startswith("tcp://")) and (not md_address.startswith("ssl://")) ): md_address = "tcp://" + md_address if md_address_level2: if ( (not md_address_level2.startswith("tcp://")) and (not md_address_level2.startswith("ssl://")) ): md_address_level2 = "tcp://" + md_address_level2 # 获取自定义价差/价比合约的配置 try: from vnpy.trader.engine import CustomContract c = CustomContract() self.combiner_conf_dict = c.get_config() if len(self.combiner_conf_dict) > 0: self.write_log(u'加载的自定义价差/价比配置:{}'.format(self.combiner_conf_dict)) contract_dict = c.get_contracts() for vt_symbol, contract in contract_dict.items(): contract.gateway_name = self.gateway_name symbol_exchange_map[contract.symbol] = contract.exchange self.on_contract(contract) except Exception as ex: # noqa pass if not self.td_api: self.td_api = CtpTdApi(self) self.td_api.connect(td_address, userid, password, brokerid, auth_code, appid, product_info) if not self.md_api: self.md_api = CtpMdApi(self) self.md_api.connect(md_address, userid, password, brokerid) if not self.l2_md_api and md_address_level2: self.write_log(f'激活五档行情配置:{md_address_level2}') self.l2_md_api = CtpMdApi(gateway=self, level2=True) self.l2_md_api.connect(md_address_level2, userid, password, brokerid) if rabbit_dict: self.write_log(f'激活RabbitMQ行情接口') self.rabbit_api = SubMdApi(gateway=self) self.rabbit_api.connect(rabbit_dict) elif tq_dict is not None: self.write_log(f'激活天勤行情接口') self.tq_api = TqMdApi(gateway=self) self.tq_api.connect(tq_dict) elif tdx_dict is not None: self.write_log(f'激活通达信行情接口') self.tdx_api = TdxMdApi(gateway=self) self.tdx_api.connect() self.init_query() for (vt_symbol, is_bar) in list(self.subscribed_symbols): symbol, exchange = extract_vt_symbol(vt_symbol) # 获取合约的缩写号 underlying_symbol = get_underlying_symbol(vt_symbol) dt = datetime.now() # 若为中金所等的合约,白天才提交订阅请求 if underlying_symbol in MARKET_DAY_ONLY and not (8 < dt.hour < 16): continue req = SubscribeRequest( symbol=symbol, exchange=exchange, is_bar=is_bar ) # 指数合约,从tdx行情、天勤订阅 if req.symbol[-2:] in ['99']: req.symbol = req.symbol.upper() if self.tdx_api is not None: self.write_log(u'有指数订阅,连接通达信行情服务器') self.tdx_api.connect() self.tdx_api.subscribe(req) elif self.rabbit_api is not None: # 使用rabbitmq获取 self.rabbit_api.subscribe(req) elif self.tq_api: # 使用天勤行情获取 self.tq_api.subscribe(req) else: # 上期所、上能源支持五档行情,使用天勤接口 if self.tq_api and req.exchange in [Exchange.SHFE, Exchange.INE]: self.write_log(f'使用天勤接口订阅') self.tq_api.subscribe(req) else: self.md_api.subscribe(req) def check_status(self): """检查状态""" # 检查交易接口、行情接口的连接状态 if self.td_api.connect_status and self.md_api.connect_status: self.status.update({'con': True}) # 检查通达信行情接口(直接连通达信) if self.tdx_api: self.tdx_api.check_status() # 检查天勤行情接口 if self.tq_api: self.tq_api.check_status() if not self.td_api.connect_status or self.md_api.connect_status: return False return True def subscribe(self, req: SubscribeRequest): """ 订阅合约行情 普通合约 => ctp行情、5档行情 指数合约 => 通达信、rabbitMQ,天勤 套利合约 => 合约合并器 :param req: :return: """ try: if self.md_api: # 如果是自定义的套利合约符号 if req.symbol in self.combiner_conf_dict: self.write_log(u'订阅自定义套利合约:{}'.format(req.symbol)) # 创建合成器 if req.symbol not in self.combiners: setting = self.combiner_conf_dict.get(req.symbol) setting.update({"symbol": req.symbol}) combiner = TickCombiner(self, setting) # 更新合成器 self.write_log(u'添加{}与合成器映射'.format(req.symbol)) self.combiners.update({setting.get('symbol'): combiner}) # 增加映射( leg1 对应的合成器列表映射) leg1_symbol = setting.get('leg1_symbol') leg1_exchange = Exchange(setting.get('leg1_exchange')) combiner_list = self.tick_combiner_map.get(leg1_symbol, []) if combiner not in combiner_list: self.write_log(u'添加Leg1:{}与合成器得映射'.format(leg1_symbol)) combiner_list.append(combiner) self.tick_combiner_map.update({leg1_symbol: combiner_list}) # 增加映射( leg2 对应的合成器列表映射) leg2_symbol = setting.get('leg2_symbol') leg2_exchange = Exchange(setting.get('leg2_exchange')) combiner_list = self.tick_combiner_map.get(leg2_symbol, []) if combiner not in combiner_list: self.write_log(u'添加Leg2:{}与合成器得映射'.format(leg2_symbol)) combiner_list.append(combiner) self.tick_combiner_map.update({leg2_symbol: combiner_list}) self.write_log(u'订阅leg1:{}'.format(leg1_symbol)) leg1_req = SubscribeRequest( symbol=leg1_symbol, exchange=leg1_exchange ) self.subscribe(leg1_req) self.write_log(u'订阅leg2:{}'.format(leg2_symbol)) leg2_req = SubscribeRequest( symbol=leg2_symbol, exchange=leg2_exchange ) self.subscribe(leg2_req) self.subscribed_symbols.add((req.vt_symbol, req.is_bar)) else: self.write_log(u'{}合成器已经在存在'.format(req.symbol)) return elif req.exchange == Exchange.SPD: self.write_error(u'自定义合约{}不在CTP设置中'.format(req.symbol)) return # 指数合约,从tdx行情订阅 if req.symbol[-2:] in ['99']: req.symbol = req.symbol.upper() if self.tdx_api: self.write_log(f'使用通达信接口订阅{req.symbol}') self.tdx_api.subscribe(req) elif self.rabbit_api: self.write_log(f'使用RabbitMQ接口订阅{req.symbol}') self.rabbit_api.subscribe(req) elif self.tq_api: self.write_log(f'使用天勤接口订阅{req.symbol}') self.tq_api.subscribe(req) else: if req.symbol not in self.subscribed_index_symbols: self.write_log(f'使用本地指数生成器进行订阅') self.subscribe_local_index(req) else: # 上期所、上能源支持五档行情,使用天勤接口 if self.tq_api and req.exchange in [Exchange.SHFE, Exchange.INE]: self.write_log(f'使用天勤接口订阅{req.symbol}') self.tq_api.subscribe(req) if self.l2_md_api and req.exchange in [Exchange.SHFE, Exchange.INE]: self.write_log(f'使用五档行情接口订阅:{req.symbol}') self.l2_md_api.subscribe(req) else: #self.write_log(f'使用CTP接口订阅{req.symbol}') self.md_api.subscribe(req) # Allow the strategies to start before the connection self.subscribed_symbols.add((req.vt_symbol, req.is_bar)) if req.is_bar: self.subscribe_bar(req) except Exception as ex: self.write_error(u'订阅合约异常:{},{}'.format(str(ex), traceback.format_exc())) def subscribe_bar(self, req: SubscribeRequest): """订阅1分钟行情""" vt_symbol = req.vt_symbol if vt_symbol in self.klines: return # 创建1分钟bar产生器 self.write_log(u'创建:{}的一分钟行情产生器'.format(vt_symbol)) bg = BarGenerator(on_bar=self.on_bar) self.klines.update({vt_symbol: bg}) def subscribe_local_index(self, req): """ 订阅本地合约 :param req: :return: """ underlying_symbol = get_underlying_symbol(req.symbol) symbol_info = future_contracts.get(underlying_symbol,None) if symbol_info: generator = IndexGenerator(gateway=self, setting=symbol_info) # 登记订阅真实合约 <=>合成器 关系 for vn_symbol in generator.symbols: self.index_generators[vn_symbol] = generator # 登记指数合约到本地已订阅信息 self.subscribed_index_symbols.append(req.symbol) else: self.write_error(f'{underlying_symbol}信息没有在vnpy/data/tdx/future_contracts.json文件中,不能创建指数订阅') def send_order(self, req: OrderRequest): """""" return self.td_api.send_order(req) def cancel_order(self, req: CancelRequest): """""" self.td_api.cancel_order(req) return True def query_account(self): """""" self.td_api.query_account() def query_position(self): """""" self.td_api.query_position() def query_history(self, req: HistoryRequest) -> List[BarData]: """查询K线历史""" if self.tq_api: return self.tq_api.query_history(req) else: return [] def close(self): """""" if self.md_api: self.write_log('断开行情API',on_log=True) tmp1 = self.md_api self.md_api = None tmp1.close() if self.l2_md_api: self.write_log('断开五档行情API',on_log=True) tmp1 = self.l2_md_api self.l2_md_api = None tmp1.close() if self.td_api: self.write_log('断开交易API',on_log=True) tmp2 = self.td_api self.td_api = None tmp2.close() if self.tdx_api: self.write_log(u'断开tdx指数行情API',on_log=True) tmp3 = self.tdx_api self.tdx_api = None tmp3.close() if self.rabbit_api: self.write_log(u'断开rabbit MQ tdx指数行情API',on_log=True) tmp4 = self.rabbit_api self.rabbit_api = None tmp4.close() if self.tq_api: self.write_log(u'断开天勤行情API',on_log=True) tmp5 = self.tq_api self.tq_api = None tmp5.close() def process_timer_event(self, event): """""" self.count += 1 if self.count < 2: return self.count = 0 func = self.query_functions.pop(0) func() self.query_functions.append(func) def init_query(self): """""" self.count = 0 self.query_functions = [self.query_account, self.query_position] self.event_engine.register(EVENT_TIMER, self.process_timer_event) def on_custom_tick(self, tick): """推送自定义合约行情""" # 自定义合约行情 for combiner in self.tick_combiner_map.get(tick.symbol, []): tick = copy(tick) combiner.on_tick(tick) # 推送至指数生成器 if tick.symbol in self.index_generators: tick = copy(tick) # 推送on_tick()方法 self.index_generators[tick.symbol].on_tick(tick) class CtpMdApi(MdApi): """""" def __init__(self, gateway, level2=False): """Constructor""" super(CtpMdApi, self).__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.level2 = level2 # 5档行情 if self.level2: self.name = "L2_" else: self.name = "" self.reqid = 0 self.connect_status = False self.login_status = False self.subscribed = set() self.userid = "" self.password = "" self.brokerid = "" # 缓存tick的交易日和当前累计volume self.last_ticks_info = {} # {symbol: {'trading_dat':'xxx', volume:xxx}} def onFrontConnected(self): """ Callback when front server is connected. """ self.gateway.write_log(f"{self.name}行情服务器连接成功",on_log=True) self.connect_status = True self.login() self.gateway.status.update( {f'{self.name}md_con': True, f'{self.name}md_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}) def onFrontDisconnected(self, reason: int): """ Callback when front server is disconnected. """ self.login_status = False self.connect_status = False self.gateway.write_log(f"{self.name}行情服务器连接断开,原因{reason}",on_log=True) self.gateway.status.update( {f'{self.name}md_con': False, f'{self.name}md_dis_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}) def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool): """ Callback when user is logged in. """ if not error["ErrorID"]: self.login_status = True self.gateway.write_log(f"{self.name}行情服务器登录成功",on_log=True) for symbol in self.subscribed: self.subscribeMarketData(symbol) else: self.gateway.write_error(f"{self.name}行情服务器登录失败", error) def onRspError(self, error: dict, reqid: int, last: bool): """ Callback when error occured. """ self.gateway.write_error(f"{self.name}行情接口报错", error) def onRspSubMarketData(self, data: dict, error: dict, reqid: int, last: bool): """""" if not error or not error["ErrorID"]: return self.gateway.write_error(f"{self.name}行情订阅失败", error) def onRtnDepthMarketData(self, data: dict): """ Callback of tick data update. """ symbol = data["InstrumentID"] exchange = symbol_exchange_map.get(symbol, "") if not exchange: return # 取当前时间 dt = datetime.now() s_date = dt.strftime('%Y-%m-%d') timestamp = f"{s_date} {data['UpdateTime']}.{int(data['UpdateMillisec'] / 100)}" dt = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f") # 不处理开盘前的tick数据 if dt.hour in [7, 8, 18, 19, 20] and dt.minute <= 59: return if exchange is Exchange.CFFEX and dt.hour == 9 and dt.minute <= 29: return today_volume = data["Volume"] last_tick_info = self.last_ticks_info.get(symbol, {}) trading_day = get_trading_date(dt) last_trading_day = last_tick_info.get('trading_day', None) if last_trading_day == trading_day: volume_changed = max(0, today_volume - last_tick_info.get('volume', 0)) else: volume_changed = today_volume self.last_ticks_info.update({symbol: {'trading_day': trading_day, 'volume': today_volume}}) tick = TickData( symbol=symbol, exchange=exchange, datetime=dt, date=s_date, time=dt.strftime('%H:%M:%S.%f'), trading_day=trading_day, name=symbol_name_map.get(symbol,symbol), volume=today_volume, last_volume=volume_changed, open_interest=data["OpenInterest"], last_price=data["LastPrice"], limit_up=data["UpperLimitPrice"], limit_down=data["LowerLimitPrice"], open_price=adjust_price(data["OpenPrice"]), high_price=adjust_price(data["HighestPrice"]), low_price=adjust_price(data["LowestPrice"]), pre_close=adjust_price(data["PreClosePrice"]), bid_price_1=adjust_price(data["BidPrice1"]), ask_price_1=adjust_price(data["AskPrice1"]), bid_volume_1=data["BidVolume1"], ask_volume_1=data["AskVolume1"], gateway_name=self.gateway_name ) # 处理一下标准套利合约的last_price if '&' in symbol: tick.last_price = (tick.ask_price_1 + tick.bid_price_1) / 2 if data["BidVolume2"] or data["AskVolume2"]: tick.bid_price_2 = adjust_price(data["BidPrice2"]) tick.bid_price_3 = adjust_price(data["BidPrice3"]) tick.bid_price_4 = adjust_price(data["BidPrice4"]) tick.bid_price_5 = adjust_price(data["BidPrice5"]) tick.ask_price_2 = adjust_price(data["AskPrice2"]) tick.ask_price_3 = adjust_price(data["AskPrice3"]) tick.ask_price_4 = adjust_price(data["AskPrice4"]) tick.ask_price_5 = adjust_price(data["AskPrice5"]) tick.bid_volume_2 = adjust_price(data["BidVolume2"]) tick.bid_volume_3 = adjust_price(data["BidVolume3"]) tick.bid_volume_4 = adjust_price(data["BidVolume4"]) tick.bid_volume_5 = adjust_price(data["BidVolume5"]) tick.ask_volume_2 = adjust_price(data["AskVolume2"]) tick.ask_volume_3 = adjust_price(data["AskVolume3"]) tick.ask_volume_4 = adjust_price(data["AskVolume4"]) tick.ask_volume_5 = adjust_price(data["AskVolume5"]) self.gateway.on_tick(tick) self.gateway.on_custom_tick(tick) def connect(self, address: str, userid: str, password: str, brokerid: int): """ Start connection to server. """ self.userid = userid self.password = password self.brokerid = brokerid # If not connected, then start connection first. if not self.connect_status: path = get_folder_path(self.gateway_name.lower()) self.createFtdcMdApi(str(path) + "\\Md") self.registerFront(address) self.init() self.connect_status = True # If already connected, then login immediately. elif not self.login_status: self.login() def login(self): """ Login onto server. """ self.gateway.write_log(f'{self.name}向行情服务器发出登录请求') req = { "UserID": self.userid, "Password": self.password, "BrokerID": self.brokerid } self.reqid += 1 self.reqUserLogin(req, self.reqid) def subscribe(self, req: SubscribeRequest): """ Subscribe to tick data update. """ if req.symbol not in self.subscribed: self.gateway.write_log(f'{self.name}订阅:{req.exchange} {req.symbol}') if self.login_status: self.subscribeMarketData(req.symbol) self.subscribed.add(req.symbol) def close(self): """ Close the connection. """ if self.connect_status: self.exit() class CtpTdApi(TdApi): """""" def __init__(self, gateway): """Constructor""" super(CtpTdApi, self).__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.reqid = 0 self.order_ref = 0 self.connect_status = False self.login_status = False self.auth_staus = False self.login_failed = False self.userid = "" self.password = "" self.brokerid = "" self.auth_code = "" self.appid = "" self.product_info = "" self.frontid = 0 self.sessionid = 0 self.order_data = [] self.trade_data = [] self.positions = {} self.sysid_orderid_map = {} self.future_contract_changed = False self.accountid = self.userid self.long_option_cost = None # 多头期权动态市值 self.short_option_cost = None # 空头期权动态市值 def onFrontConnected(self): """""" self.gateway.write_log("交易服务器连接成功",on_log=True) self.connect_status = True if self.auth_code: self.gateway.write_log("向交易服务器提交授权码验证") self.authenticate() else: self.gateway.write_log("向交易服务器进行帐号登录") self.login() def onFrontDisconnected(self, reason: int): """""" self.login_status = False self.gateway.write_log(f"交易服务器连接断开,原因{reason}",on_log=True) self.gateway.status.update({'td_con': False, 'td_dis_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}) def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool): """""" if not error['ErrorID']: self.auth_staus = True self.gateway.write_log("交易服务器授权验证成功") self.gateway.status.update({"td_auth": True}) self.login() else: self.gateway.write_error("交易服务器授权验证失败", error) self.gateway.status.update({"td_auth":False}) def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool): """""" if not error["ErrorID"]: self.frontid = data["FrontID"] self.sessionid = data["SessionID"] self.login_status = True self.gateway.status.update({'td_con': True, 'td_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}) self.gateway.write_log("交易帐号登录完成",on_log=True) # Confirm settlement req = { "BrokerID": self.brokerid, "InvestorID": self.userid } self.reqid += 1 self.reqSettlementInfoConfirm(req, self.reqid) else: self.login_failed = True self.gateway.status.update({'td_con': False,'td_login_fail_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}) self.gateway.write_error("交易服务器登录失败", error) def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool): """""" order_ref = data["OrderRef"] orderid = f"{self.frontid}_{self.sessionid}_{order_ref}" symbol = data["InstrumentID"] exchange = symbol_exchange_map[symbol] order_type = OrderType.LIMIT if data["OrderPriceType"] == THOST_FTDC_OPT_LimitPrice and data["TimeCondition"] == THOST_FTDC_TC_IOC: if data["VolumeCondition"] == THOST_FTDC_VC_AV: order_type = OrderType.FAK elif data["VolumeCondition"] == THOST_FTDC_VC_CV: order_type = OrderType.FOK if data["OrderPriceType"] == THOST_FTDC_OPT_AnyPrice: order_type = OrderType.MARKET order = OrderData( symbol=symbol, exchange=exchange, accountid=self.accountid, orderid=orderid, type=order_type, direction=DIRECTION_CTP2VT[data["Direction"]], offset=OFFSET_CTP2VT.get(data["CombOffsetFlag"], Offset.NONE), price=data["LimitPrice"], volume=data["VolumeTotalOriginal"], status=Status.REJECTED, gateway_name=self.gateway_name ) self.gateway.on_order(order) # self.gateway.write_error("交易委托失败", error) def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool): """""" self.gateway.write_error("交易撤单失败", error) def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool): """""" pass def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool): """ Callback of settlment info confimation. """ self.gateway.write_log("结算信息确认成功",on_log=True) while True: self.reqid += 1 n = self.reqQryInstrument({}, self.reqid) if not n: break else: sleep(1) def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool): """""" if not data: return # Check if contract data received if data["InstrumentID"] in symbol_exchange_map: # Get buffered position object key = f"{data['InstrumentID'], data['PosiDirection']}" position = self.positions.get(key, None) if not position: position = PositionData( accountid=self.accountid, symbol=data["InstrumentID"], exchange=symbol_exchange_map[data["InstrumentID"]], direction=DIRECTION_CTP2VT[data["PosiDirection"]], gateway_name=self.gateway_name ) self.positions[key] = position # For SHFE and INE position data update if position.exchange in [Exchange.SHFE, Exchange.INE]: if data["YdPosition"] and not data["TodayPosition"]: position.yd_volume = data["Position"] # For other exchange position data update else: position.yd_volume = data["Position"] - data["TodayPosition"] # Get contract size (spread contract has no size value) size = symbol_size_map.get(position.symbol, 0) # Calculate previous position cost cost = position.price * position.volume * size # Update new position volume position.volume += data["Position"] if data["PositionProfit"] == 0 and position.symbol in option_name_map: position.pnl += data["PositionCost"] - data["OpenCost"] else: position.pnl += data["PositionProfit"] # Calculate average position price if position.volume and size: cost += data["PositionCost"] position.price = cost / (position.volume * size) # Get frozen volume if position.direction == Direction.LONG: position.frozen += data["ShortFrozen"] else: position.frozen += data["LongFrozen"] position.cur_price = self.gateway.prices.get(position.vt_symbol, None) if position.cur_price is None: position.cur_price = position.price # 交易所有时候会给一些奇怪得套利合约,并且主动腿和被动腿是相同得,排除掉这些垃圾合约 if position.symbol.startswith('SP') and '&' in position.symbol: act_symbol, pas_symbol = position.symbol.split(' ')[-1].split('&') if act_symbol != pas_symbol: self.gateway.subscribe(SubscribeRequest(symbol=position.symbol, exchange=position.exchange)) else: # 获取合约的缩写号 underlying_symbol = get_underlying_symbol(position.symbol) dt = datetime.now() # 若为中金所等的合约,白天才提交订阅请求 if not (underlying_symbol in MARKET_DAY_ONLY and not (8 < dt.hour < 16)): self.gateway.subscribe(SubscribeRequest(symbol=position.symbol, exchange=position.exchange)) if last: self.long_option_cost = None self.short_option_cost = None for position in self.positions.values(): if position.symbol in option_name_map: # 重新累计多头期权动态权益 if position.direction == Direction.LONG: if self.long_option_cost is None: self.long_option_cost = position.cur_price * position.volume * symbol_size_map.get( position.symbol, 0) else: self.long_option_cost += position.cur_price * position.volume * symbol_size_map.get( position.symbol, 0) # 重新累计空头期权动态权益 if position.direction == Direction.SHORT: if self.short_option_cost is None: self.short_option_cost = position.cur_price * position.volume * symbol_size_map.get( position.symbol, 0) else: self.short_option_cost += position.cur_price * position.volume * symbol_size_map.get( position.symbol, 0) self.gateway.on_position(position) self.positions.clear() def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool): """""" if "AccountID" not in data: return if len(self.accountid) == 0: self.accountid = data['AccountID'] balance = float(data["Balance"]) if self.long_option_cost is not None: balance += self.long_option_cost if self.short_option_cost is not None: balance -= self.short_option_cost account = AccountData( accountid=data["AccountID"], pre_balance=round(float(data['PreBalance']), 7), balance=round(balance, 7), frozen=round(data["FrozenMargin"] + data["FrozenCash"] + data["FrozenCommission"], 7), gateway_name=self.gateway_name ) account.available = round(float(data["Available"]), 7) account.commission = round(float(data['Commission']), 7) account.margin = round(float(data['CurrMargin']), 7) account.close_profit = round(float(data['CloseProfit']), 7) # + round( # float(data.get("SpecProductCloseProfit", 0)), 7) account.holding_profit = round(float(data['PositionProfit']), 7) # + round( # float(data.get("SpecProductPositionProfit", 0)), 7) + round( # float(data.get("SpecProductPositionProfitByAlg", 0)), 7) account.trading_day = str(data['TradingDay']) if '-' not in account.trading_day and len(account.trading_day) == 8: account.trading_day = '-'.join( [ account.trading_day[0:4], account.trading_day[4:6], account.trading_day[6:8] ] ) self.gateway.on_account(account) def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool): """ Callback of instrument query. """ product = PRODUCT_CTP2VT.get(data["ProductClass"], None) if product: contract = ContractData( symbol=data["InstrumentID"], exchange=EXCHANGE_CTP2VT[data["ExchangeID"]], name=data["InstrumentName"], product=product, size=data["VolumeMultiple"], pricetick=data["PriceTick"], gateway_name=self.gateway_name ) # if 'SA' in contract.symbol: # self.gateway.write_log(print_dict(data)) # 保证金费率(期权合约的保证金比例数值可能不对,所以设置个0.2的最大值) contract.margin_rate = min(0.2, max(data.get('LongMarginRatio', 0), data.get('ShortMarginRatio', 0))) if contract.margin_rate == 0: contract.margin_rate = 0.1 # For option only if contract.product == Product.OPTION: # Remove C/P suffix of CZCE option product name if contract.exchange == Exchange.CZCE: contract.option_portfolio = data["ProductID"][:-1] else: contract.option_portfolio = data["ProductID"] contract.option_underlying = data["UnderlyingInstrID"] contract.option_type = OPTIONTYPE_CTP2VT.get(data["OptionsType"], None) contract.option_strike = data["StrikePrice"] contract.option_index = str(data["StrikePrice"]) contract.option_expiry = datetime.strptime(data["ExpireDate"], "%Y%m%d") option_name_map[contract.symbol] = contract.name self.gateway.on_contract(contract) symbol_exchange_map[contract.symbol] = contract.exchange symbol_name_map[contract.symbol] = contract.name symbol_size_map[contract.symbol] = contract.size if contract.product == Product.FUTURES: # 生成指数合约信息 underlying_symbol = data["ProductID"] # 短合约名称 underlying_symbol = underlying_symbol.upper() # 只推送普通合约的指数 if len(underlying_symbol) <= 2: idx_contract = index_contracts.get(underlying_symbol, None) if idx_contract is None: idx_contract = deepcopy(contract) idx_contract.symbol = '{}99'.format(underlying_symbol) idx_contract.name = u'{}指数'.format(underlying_symbol) idx_contract.vt_symbol = f'{idx_contract.symbol}.{idx_contract.exchange.value}' self.gateway.on_contract(idx_contract) # 获取data/tdx/future_contracts.json中的合约记录 future_contract = future_contracts.get(underlying_symbol, {}) mi_contract_symbol = future_contract.get('mi_symbol', '') margin_rate = float(future_contract.get('margin_rate', 0)) mi_margin_rate = round(idx_contract.margin_rate, 4) if mi_contract_symbol == contract.symbol: if margin_rate != mi_margin_rate: self.gateway.write_log( f"{underlying_symbol}合约主力{mi_contract_symbol} 保证金{margin_rate}=>{mi_margin_rate}") future_contract.update({'margin_rate': mi_margin_rate}) future_contract.update({'symbol_size': idx_contract.size}) future_contract.update({'price_tick': idx_contract.pricetick}) if 'exchange' not in future_contract: future_contract.update({'exchange': contract.exchange.value}) future_contracts.update({underlying_symbol: future_contract}) self.future_contract_changed = True index_contracts.update({underlying_symbol: idx_contract}) if last: self.gateway.write_log("合约信息查询成功") if self.future_contract_changed: self.gateway.write_log('更新vnpy/data/tdx/future_contracts.json') save_future_contracts(future_contracts) for data in self.order_data: self.onRtnOrder(data) self.order_data.clear() for data in self.trade_data: self.onRtnTrade(data) self.trade_data.clear() def onRtnOrder(self, data: dict): """ Callback of order status update. """ symbol = data["InstrumentID"] exchange = symbol_exchange_map.get(symbol, "") if not exchange: self.order_data.append(data) return frontid = data["FrontID"] sessionid = data["SessionID"] order_ref = data["OrderRef"] orderid = f"{frontid}_{sessionid}_{order_ref}" order_type = OrderType.LIMIT if data["OrderPriceType"] == THOST_FTDC_OPT_LimitPrice and data["TimeCondition"] == THOST_FTDC_TC_IOC: if data["VolumeCondition"] == THOST_FTDC_VC_AV: order_type = OrderType.FAK elif data["VolumeCondition"] == THOST_FTDC_VC_CV: order_type = OrderType.FOK if data["OrderPriceType"] == THOST_FTDC_OPT_AnyPrice: order_type = OrderType.MARKET order = OrderData( accountid=self.accountid, symbol=symbol, exchange=exchange, orderid=orderid, sys_orderid=data.get('OrderSysID', orderid), type=order_type, direction=DIRECTION_CTP2VT[data["Direction"]], offset=OFFSET_CTP2VT[data["CombOffsetFlag"]], price=data["LimitPrice"], volume=data["VolumeTotalOriginal"], traded=data["VolumeTraded"], status=STATUS_CTP2VT[data["OrderStatus"]], time=data["InsertTime"], cancel_time=data["CancelTime"], gateway_name=self.gateway_name ) self.gateway.on_order(order) self.sysid_orderid_map[data["OrderSysID"]] = orderid def onRtnTrade(self, data: dict): """ Callback of trade status update. """ symbol = data["InstrumentID"] exchange = symbol_exchange_map.get(symbol, "") if not exchange: self.trade_data.append(data) return orderid = self.sysid_orderid_map[data["OrderSysID"]] trade_date = data['TradeDate'] if '-' not in trade_date and len(trade_date) == 8: trade_date = trade_date[0:4] + '-' + trade_date[4:6] + '-' + trade_date[6:8] trade_time = data['TradeTime'] trade_datetime = datetime.strptime(f'{trade_date} {trade_time}', '%Y-%m-%d %H:%M:%S') # print(f'raw_data:{print_dict(data)}') # 修正 郑商所、大商所的TradeDate错误 if exchange in [Exchange.DCE, Exchange.CZCE]: dt_now = datetime.now() # 交易发生在夜盘 if trade_datetime.hour >= 21: # 系统时间在夜盘,使用系统时间 if dt_now.hour >= 21: trade_date = dt_now.strftime('%Y-%m-%d') trade_datetime = datetime.strptime(f'{trade_date} {trade_time}', '%Y-%m-%d %H:%M:%S') # 系统时间在日盘 else: # 星期一 =》 星期五 if dt_now.isoweekday() == 1: trade_datetime -= timedelta(days=3) # print(f'trade time =>{trade_datetime}') # 星期二~星期五 =》上一天 else: trade_datetime -= timedelta(days=1) # print(f'trade time =>{trade_datetime}') tradeid = data["TradeID"] trade = TradeData( accountid=self.accountid, symbol=symbol, exchange=exchange, orderid=orderid, sys_orderid=data.get("OrderSysID", orderid), tradeid=tradeid.replace(' ', ''), direction=DIRECTION_CTP2VT[data["Direction"]], offset=OFFSET_CTP2VT[data["OffsetFlag"]], price=data["Price"], volume=data["Volume"], time=data["TradeTime"], datetime=trade_datetime, gateway_name=self.gateway_name ) self.gateway.on_trade(trade) def connect( self, address: str, userid: str, password: str, brokerid: int, auth_code: str, appid: str, product_info ): """ Start connection to server. """ self.userid = userid self.password = password self.brokerid = brokerid self.auth_code = auth_code self.appid = appid self.product_info = product_info if not self.connect_status: path = get_folder_path(self.gateway_name.lower()) self.createFtdcTraderApi(str(path) + "\\Td") self.subscribePrivateTopic(0) self.subscribePublicTopic(0) self.registerFront(address) self.init() self.gateway.write_log(f'交易前端连接成功') self.connect_status = True else: self.authenticate() def authenticate(self): """ Authenticate with auth_code and appid. """ req = { "UserID": self.userid, "BrokerID": self.brokerid, "AuthCode": self.auth_code, "AppID": self.appid } if self.product_info: req["UserProductInfo"] = self.product_info self.reqid += 1 self.reqAuthenticate(req, self.reqid) def login(self): """ Login onto server. """ if self.login_failed: return req = { "UserID": self.userid, "Password": self.password, "BrokerID": self.brokerid, "AppID": self.appid } self.accountid = copy(self.userid) if self.product_info: req["UserProductInfo"] = self.product_info self.reqid += 1 self.reqUserLogin(req, self.reqid) def send_order(self, req: OrderRequest): """ Send new order. """ if req.offset not in OFFSET_VT2CTP: self.gateway.write_log("请选择开平方向") return "" self.order_ref += 1 ctp_req = { "InstrumentID": req.symbol, "ExchangeID": req.exchange.value, "LimitPrice": req.price, "VolumeTotalOriginal": int(req.volume), "OrderPriceType": ORDERTYPE_VT2CTP.get(req.type, ""), "Direction": DIRECTION_VT2CTP.get(req.direction, ""), "CombOffsetFlag": OFFSET_VT2CTP.get(req.offset, ""), "OrderRef": str(self.order_ref), "InvestorID": self.userid, "UserID": self.userid, "BrokerID": self.brokerid, "CombHedgeFlag": THOST_FTDC_HF_Speculation, "ContingentCondition": THOST_FTDC_CC_Immediately, "ForceCloseReason": THOST_FTDC_FCC_NotForceClose, "IsAutoSuspend": 0, "TimeCondition": THOST_FTDC_TC_GFD, "VolumeCondition": THOST_FTDC_VC_AV, "MinVolume": 1 } if req.type == OrderType.FAK: ctp_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice ctp_req["TimeCondition"] = THOST_FTDC_TC_IOC ctp_req["VolumeCondition"] = THOST_FTDC_VC_AV elif req.type == OrderType.FOK: ctp_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice ctp_req["TimeCondition"] = THOST_FTDC_TC_IOC ctp_req["VolumeCondition"] = THOST_FTDC_VC_CV self.reqid += 1 self.reqOrderInsert(ctp_req, self.reqid) orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}" order = req.create_order_data(orderid, self.gateway_name) order.accountid = self.accountid order.vt_accountid = f"{self.gateway_name}.{self.accountid}" self.gateway.on_order(order) return order.vt_orderid def cancel_order(self, req: CancelRequest): """ Cancel existing order. """ frontid, sessionid, order_ref = req.orderid.split("_") ctp_req = { "InstrumentID": req.symbol, "ExchangeID": req.exchange.value, "OrderRef": order_ref, "FrontID": int(frontid), "SessionID": int(sessionid), "ActionFlag": THOST_FTDC_AF_Delete, "BrokerID": self.brokerid, "InvestorID": self.userid } self.reqid += 1 self.reqOrderAction(ctp_req, self.reqid) def query_account(self): """ Query account balance data. """ self.reqid += 1 self.reqQryTradingAccount({}, self.reqid) def query_position(self): """ Query position holding data. """ if not symbol_exchange_map: return req = { "BrokerID": self.brokerid, "InvestorID": self.userid } self.reqid += 1 self.reqQryInvestorPosition(req, self.reqid) def close(self): """""" if self.connect_status: self.exit() def adjust_price(price: float) -> float: """""" if price == MAX_FLOAT: price = 0 return price class TdxMdApi(): """ 通达信数据行情API实现 订阅的指数行情,更新合约的数据 """ def __init__(self, gateway): self.gateway = gateway # gateway对象 self.gateway_name = gateway.gateway_name # gateway对象名称 self.req_interval = 0.5 # 操作请求间隔500毫秒 self.req_id = 0 # 操作请求编号 self.connection_status = False # 连接状态 self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典 self.symbol_market_dict = {} # tdx合约与tdx市场的字典 self.symbol_vn_dict = {} # tdx合约与vt_symbol的对应 self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典 self.registered_symbol_set = set() self.thread = None # 查询线程 self.ip_list = TDX_FUTURE_HOSTS # 调出 self.best_ip = {} # 最佳IP地址和端口 self.api = None # API 的连接会话对象 self.last_tick_dt = datetime.now() # 记录该会话对象的最后一个tick时间 self.instrument_count = 50000 self.has_qry_instrument = False # ---------------------------------------------------------------------- def ping(self, ip, port=7709): """ ping行情服务器 :param ip: :param port: :param type_: :return: """ apix = TdxExHq_API() __time1 = datetime.now() try: with apix.connect(ip, port): if apix.get_instrument_count() > 10000: _timestamp = (datetime.now() - __time1).total_seconds() * 1000 self.gateway.write_log('服务器{}:{},耗时:{}ms'.format(ip, port, _timestamp)) return _timestamp else: self.gateway.write_log(u'该服务器IP {}无响应.'.format(ip)) return timedelta(seconds=10).total_seconds() * 1000 except Exception as ex: self.gateway.write_log(u'tdx ping服务器{},异常的响应{}'.format(ip, str(ex))) return timedelta(seconds=10).total_seconds() * 1000 def sort_ip_speed(self): """ 对所有服务器进行速度排序 :return: """ speed_result = [] for x in self.ip_list: speed = self.ping(x['ip'], x['port']) x.update({'speed': speed}) speed_result.append(copy(x)) # 更新服务器,按照速度排序 speed_result = sorted(speed_result, key=lambda s: s['speed']) self.gateway.write_log(u'服务器访问速度排序:{}'.format(speed_result)) return speed_result # ---------------------------------------------------------------------- def select_best_ip(self, exclude_ip: str = None): """ 选择行情服务器 :param: exclude_ip, 排除的ip地址 :return: """ self.gateway.write_log(u'选择通达信行情服务器') ip_list = self.sort_ip_speed() valid_ip_list = [x for x in ip_list if x.get('speed', 10000) < 10000 and x.get('ip') != exclude_ip] if len(valid_ip_list) == 0: self.gateway.write_error(u'未能找到合适速度得行情服务器') return None best_future_ip = valid_ip_list[0] save_cache_json(best_future_ip, TDX_FUTURE_CONFIG) return best_future_ip def connect(self, is_reconnect=False): """ 连接通达讯行情服务器 :param is_reconnect:是否重连 :return: """ # 创建api连接对象实例 try: if self.api is None or not self.connection_status: self.gateway.write_log(u'开始连接通达信行情服务器',on_log=True) self.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True) # 选取最佳服务器 if is_reconnect or len(self.best_ip) == 0: self.best_ip = get_cache_json(TDX_FUTURE_CONFIG) if len(self.best_ip) == 0: self.best_ip = self.select_best_ip() self.api.connect(self.best_ip['ip'], self.best_ip['port']) # 尝试获取市场合约统计 c = self.api.get_instrument_count() if c < 10: err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port']) self.gateway.write_error(err_msg) else: self.gateway.write_log(u'创建tdx连接, IP: {}/{}'.format(self.best_ip['ip'], self.best_ip['port'])) self.connection_status = True self.gateway.status.update( {'tdx_con': True, 'tdx_con_time': datetime.now().strftime('%Y-%m-%d %H:%M%S')}) self.thread = Thread(target=self.run) self.thread.start() except Exception as ex: self.gateway.write_log(u'连接服务器tdx异常:{},{}'.format(str(ex), traceback.format_exc()),on_log=True) return def close(self): """退出API""" self.gateway.write_log(u'退出tdx API',on_log=True) self.connection_status = False if self.thread: self.thread.join() # ---------------------------------------------------------------------- def subscribe(self, subscribeReq): """订阅合约""" # 这里的设计是,如果尚未登录就调用了订阅方法 # 则先保存订阅请求,登录完成后会自动订阅 vn_symbol = str(subscribeReq.symbol) vn_symbol = vn_symbol.upper() self.gateway.write_log(u'通达信行情订阅 {}'.format(str(vn_symbol))) if vn_symbol[-2:] != '99': self.gateway.write_log(u'{}不是指数合约,不能订阅'.format(vn_symbol)) return tdx_symbol = vn_symbol[0:-2] + 'L9' tdx_symbol = tdx_symbol.upper() self.gateway.write_log(u'{}=>{}'.format(vn_symbol, tdx_symbol)) self.symbol_vn_dict[tdx_symbol] = vn_symbol if tdx_symbol not in self.registered_symbol_set: self.registered_symbol_set.add(tdx_symbol) self.check_status() def check_status(self): """ 检查通达信直连状态 :return: """ # self.write_log(u'检查tdx接口状态') if len(self.registered_symbol_set) == 0: return # 若还没有启动连接,就启动连接 over_time = (datetime.now() - self.last_tick_dt).total_seconds() > 60 if not self.connection_status or self.api is None or over_time: self.gateway.write_log(u'tdx还没有启动连接,就启动连接') self.close() self.thread = None self.connect(is_reconnect=True) def qry_instrument(self): """ 查询/更新合约信息 :return: """ if not self.connection_status: self.gateway.write_error(u'tdx连接状态为断开,不能查询和更新合约信息') return if self.has_qry_instrument: self.gateway.write_error(u'已经查询过一次合约信息,不再查询') return # 取得所有的合约信息 num = self.api.get_instrument_count() if not isinstance(num, int): return all_contacts = sum( [self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)], []) # [{"category":category,"market": int,"code":sting,"name":string,"desc":string},{}] # 对所有合约处理,更新字典 指数合约-tdx市场,指数合约-交易所 for tdx_contract in all_contacts: tdx_symbol = tdx_contract.get('code', None) if tdx_symbol is None or tdx_symbol[-2:] not in ['L9']: continue tdx_market_id = tdx_contract.get('market') self.symbol_market_dict[tdx_symbol] = tdx_market_id if tdx_market_id == 47: # 中金所 self.symbol_exchange_dict[tdx_symbol] = Exchange.CFFEX elif tdx_market_id == 28: # 郑商所 self.symbol_exchange_dict[tdx_symbol] = Exchange.CZCE elif tdx_market_id == 29: # 大商所 self.symbol_exchange_dict[tdx_symbol] = Exchange.DCE elif tdx_market_id == 30: # 上期所+能源 self.symbol_exchange_dict[tdx_symbol] = Exchange.SHFE elif tdx_market_id == 60: # 主力合约 self.gateway.write_log(u'主力合约:{}'.format(tdx_contract)) self.has_qry_instrument = True def run(self): # 直接查询板块 try: last_dt = datetime.now() self.gateway.write_log(u'开始运行tdx查询指数行情线程,{}'.format(last_dt)) while self.connection_status: if len(self.registered_symbol_set) > 0: try: self.process_index_req() except BrokenPipeError as bex: self.gateway.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), 0)) self.connect(is_reconnect=True) sleep(5) break except Exception as ex: self.gateway.write_error(u'tdx exception:{},{}'.format(str(ex), traceback.format_exc())) self.gateway.write_error(u'重试重连tdx') self.connect(is_reconnect=True) sleep(self.req_interval) dt = datetime.now() if last_dt.minute != dt.minute: self.gateway.write_log( 'tdx check point. {}, process symbols:{}'.format(dt, self.registered_symbol_set)) last_dt = dt except Exception as ex: self.gateway.write_error(u'tdx thead.run exception:{},{}'.format(str(ex), traceback.format_exc())) self.gateway.write_error(u'tdx查询线程 {}退出'.format(datetime.now())) def process_index_req(self): """处理板块获取指数行情tick""" # 获取通达信指数板块所有行情 rt_list = self.api.get_instrument_quote_list(42, 3, 0, 100) if rt_list is None or len(rt_list) == 0: self.gateway.write_log(u'tdx: rt_list为空') return # 记录该接口的行情最后更新时间 self.last_tick_dt = datetime.now() for d in list(rt_list): tdx_symbol = d.get('code', None) if tdx_symbol not in self.registered_symbol_set and tdx_symbol is not None: continue # tdx_symbol => vn_symbol vn_symbol = self.symbol_vn_dict.get(tdx_symbol, None) if vn_symbol is None: self.gateway.write_error(u'self.symbol_vn_dict 取不到映射得:{}'.format(tdx_symbol)) continue # vn_symbol => exchange exchange = self.symbol_exchange_dict.get(tdx_symbol, None) underlying_symbol = get_underlying_symbol(vn_symbol) if exchange is None: symbol_info = future_contracts.get(underlying_symbol, None) if not symbol_info: continue exchange_value = symbol_info.get('exchange', None) exchange = Exchange(exchange_value) if exchange is None: continue self.symbol_exchange_dict.update({tdx_symbol: exchange}) tick_datetime = datetime.now() # 修正毫秒 last_tick = self.symbol_tick_dict.get(vn_symbol, None) if (last_tick is not None) and tick_datetime.replace(microsecond=0) == last_tick.datetime: # 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒 tick_datetime = tick_datetime.replace(microsecond=500) else: tick_datetime = tick_datetime.replace(microsecond=0) tick = TickData(gateway_name=self.gateway_name, symbol=vn_symbol, exchange=exchange, datetime=tick_datetime) tick.pre_close = float(d.get('ZuoJie', 0.0)) tick.high_price = float(d.get('ZuiGao', 0.0)) tick.open_price = float(d.get('JinKai', 0.0)) tick.low_price = float(d.get('ZuiDi', 0.0)) tick.last_price = float(d.get('MaiChu', 0.0)) tick.volume = int(d.get('XianLiang', 0)) tick.last_volume = tick.volume tick.open_interest = d.get('ChiCangLiang') tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12] tick.date = tick.datetime.strftime('%Y-%m-%d') tick.trading_day = get_trading_date(tick.datetime) # 指数没有涨停和跌停,就用昨日收盘价正负10% tick.limit_up = tick.pre_close * 1.1 tick.limit_down = tick.pre_close * 0.9 # CTP只有一档行情 tick.bid_price_1 = float(d.get('MaiRuJia', 0.0)) tick.bid_volume_1 = int(d.get('MaiRuLiang', 0)) tick.ask_price_1 = float(d.get('MaiChuJia', 0.0)) tick.ask_volume_1 = int(d.get('MaiChuLiang', 0)) # 排除非交易时间得tick if tick.exchange is Exchange.CFFEX: if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]: continue if tick.datetime.hour == 9 and tick.datetime.minute < 15: continue # 排除早盘 11:30~12:00 if tick.datetime.hour == 11 and tick.datetime.minute >= 30: continue if tick.datetime.hour == 15 and tick.datetime.minute >= 15 and underlying_symbol in ['T', 'TF', 'TS']: continue if tick.datetime.hour == 15 and underlying_symbol in ['IH', 'IF', 'IC']: continue else: # 大商所/郑商所,上期所,上海能源 # 排除非开盘小时 if tick.datetime.hour in [3, 4, 5, 6, 7, 8, 12, 15, 16, 17, 18, 19, 20]: continue # 排除早盘 10:15~10:30 if tick.datetime.hour == 10 and 15 <= tick.datetime.minute < 30: continue # 排除早盘 11:30~12:00 if tick.datetime.hour == 11 and tick.datetime.minute >= 30: continue # 排除午盘 13:00 ~13:30 if tick.datetime.hour == 13 and tick.datetime.minute < 30: continue # 排除凌晨2:30~3:00 if tick.datetime.hour == 2 and tick.datetime.minute >= 30: continue # 排除大商所/郑商所夜盘数据上期所夜盘数据 23:00 收盘 if underlying_symbol in NIGHT_MARKET_23: if tick.datetime.hour in [23, 0, 1, 2]: continue # 排除上期所夜盘数据 1:00 收盘 if underlying_symbol in NIGHT_MARKET_SQ2: if tick.datetime.hour in [1, 2]: continue # 排除日盘合约在夜盘得数据 if underlying_symbol in MARKET_DAY_ONLY and (tick.datetime.hour < 9 or tick.datetime.hour > 16): # self.write_log(u'排除日盘合约{}在夜盘得数据'.format(short_symbol)) continue # self.gateway.write_log(f'{tick.__dict__}') pre_tick = self.symbol_tick_dict.get(tick.symbol, None) self.symbol_tick_dict[tick.symbol] = tick # 排除指数的异常数据(tdx有些服务器异常,返回数据偏差超过上一tick的20%) if pre_tick: if tick.last_price > pre_tick.last_price * 1.2 or tick.last_price < pre_tick.last_price * 0.8: continue self.gateway.on_tick(tick) self.gateway.on_custom_tick(tick) class SubMdApi(): """ RabbitMQ Subscriber 数据行情接收API """ def __init__(self, gateway): self.gateway = gateway self.gateway_name = gateway.gateway_name self.symbol_tick_dict = {} # 合约与最后一个Tick得字典 self.registed_symbol_set = set() # 订阅的合约记录集 self.last_tick_dt = None self.sub = None self.setting = {} self.connect_status = False self.thread = None def connect(self, setting={}): """连接""" self.setting = setting try: self.sub = subscriber( host=self.setting.get('host', 'localhost'), port=self.setting.get('port', 5672), user=self.setting.get('user', 'admin'), password=self.setting.get('password', 'admin'), exchange=self.setting.get('exchange', 'x_fanout_idx_tick')) self.sub.set_callback(self.on_message) self.thread = Thread(target=self.sub.start) self.thread.start() self.connect_status = True self.gateway.status.update({'sub_con': True, 'sub_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}) except Exception as ex: self.gateway.write_error(u'连接RabbitMQ {} 异常:{}'.format(self.setting, str(ex))) self.gateway.write_error(traceback.format_exc()) self.connect_status = False def check_status(self): """接口状态的健康检查""" self.gateway.write_log("检查sub接口的状态") # 订阅的合约 d = {'sub_symbols': sorted(self.symbol_tick_dict.keys())} # 合约的最后时间 if self.last_tick_dt: d.update({"sub_tick_time": self.last_tick_dt.strftime('%Y-%m-%d %H:%M:%S')}) if len(self.symbol_tick_dict) > 0: dt_now = datetime.now() hh_mm = dt_now.hour * 100 + dt_now.minute # 期货交易时间内 if 900 <= hh_mm <= 1130 or 1300 <= hh_mm <= 1500 or hh_mm < 230 or hh_mm >= 2100: # 未有数据到达 if self.last_tick_dt is None: d.update({"sub_status": False, "sub_error": u"rabbitmq未有行情数据到达"}) else: # 有数据 # 超时15分钟以上 if (dt_now - self.last_tick_dt).total_seconds() > 60 * 15: d.update({"sub_status": False, "sub_error": u"{}rabbitmq行情数据超时15分钟以上".format(hh_mm)}) else: d.update({"sub_status": True}) self.gateway.status.pop("sub_error", None) # 非交易时间 else: self.gateway.status.pop("sub_status", None) self.gateway.status.pop("sub_error", None) # 更新到gateway的状态中去 self.gateway.status.update(d) def on_message(self, chan, method_frame, _header_frame, body, userdata=None): #print(" [x] %r" % body) try: str_tick = body.decode('utf-8') d = json.loads(str_tick) d.pop('rawData', None) d = self.conver_update(d) symbol = d.pop('symbol', None) if symbol == 'ZC99': a = 1 str_datetime = d.pop('datetime', None) if symbol not in self.registed_symbol_set or str_datetime is None: return if '.' in str_datetime: dt = datetime.strptime(str_datetime, '%Y-%m-%d %H:%M:%S.%f') else: dt = datetime.strptime(str_datetime, '%Y-%m-%d %H:%M:%S') tick = TickData(gateway_name=self.gateway_name, exchange=Exchange(d.get('exchange')), symbol=symbol, datetime=dt) d.pop('exchange', None) d.pop('symbol', None) tick.__dict__.update(d) if len(tick.trading_day) == 0: tick.trading_day = get_trading_date(dt) pre_tick = self.symbol_tick_dict.get(symbol,None) self.symbol_tick_dict[symbol] = tick # 排除指数的异常数据(tdx有些服务器异常,返回数据偏差超过上一tick的20%) if pre_tick: if tick.last_price > pre_tick.last_price * 1.2 or tick.last_price < pre_tick.last_price * 0.8: return self.last_tick_dt = tick.datetime self.gateway.on_tick(tick) self.gateway.on_custom_tick(tick) except Exception as ex: self.gateway.write_error(u'RabbitMQ on_message 异常:{}'.format(str(ex))) self.gateway.write_error(traceback.format_exc()) def conver_update(self, d): """转换dict, vnpy1 tick dict => vnpy2 tick dict""" if 'vtSymbol' not in d: return d symbol = d.get('symbol') exchange = d.get('exchange') vtSymbol = d.pop('vtSymbol', symbol) if '.' not in symbol: d.update({'vt_symbol': f'{symbol}.{exchange}'}) else: d.update({'vt_symbol': f'{symbol}.{Exchange.LOCAL.value}'}) # 成交数据 d.update({'last_price': d.pop('lastPrice', 0.0)}) # 最新成交价 d.update({'last_volume': d.pop('lastVolume', 0)}) # 最新成交量 d.update({'open_interest': d.pop('openInterest', 0)}) # 昨持仓量 d.update({'open_interest': d.pop('tradingDay', get_trading_date())}) # 常规行情 d.update({'open_price': d.pop('openPrice', 0)}) # 今日开盘价 d.update({'high_price': d.pop('highPrice', 0)}) # 今日最高价 d.update({'low_price': d.pop('lowPrice', 0)}) # 今日最低价 d.update({'pre_close': d.pop('preClosePrice', 0)}) # 昨收盘价 d.update({'limit_up': d.pop('upperLimit', 0)}) # 涨停价 d.update({'limit_down': d.pop('lowerLimit', 0)}) # 跌停价 # 五档行情 d.update({'bid_price_1': d.pop('bidPrice1', 0.0)}) d.update({'bid_price_2': d.pop('bidPrice2', 0.0)}) d.update({'bid_price_3': d.pop('bidPrice3', 0.0)}) d.update({'bid_price_4': d.pop('bidPrice4', 0.0)}) d.update({'bid_price_5': d.pop('bidPrice5', 0.0)}) d.update({'ask_price_1': d.pop('askPrice1', 0.0)}) d.update({'ask_price_2': d.pop('askPrice2', 0.0)}) d.update({'ask_price_3': d.pop('askPrice3', 0.0)}) d.update({'ask_price_4': d.pop('askPrice4', 0.0)}) d.update({'ask_price_5': d.pop('askPrice5', 0.0)}) d.update({'bid_volume_1': d.pop('bidVolume1', 0.0)}) d.update({'bid_volume_2': d.pop('bidVolume2', 0.0)}) d.update({'bid_volume_3': d.pop('bidVolume3', 0.0)}) d.update({'bid_volume_4': d.pop('bidVolume4', 0.0)}) d.update({'bid_volume_5': d.pop('bidVolume5', 0.0)}) d.update({'ask_volume_1': d.pop('askVolume1', 0.0)}) d.update({'ask_volume_2': d.pop('askVolume2', 0.0)}) d.update({'ask_volume_3': d.pop('askVolume3', 0.0)}) d.update({'ask_volume_4': d.pop('askVolume4', 0.0)}) d.update({'ask_volume_5': d.pop('askVolume5', 0.0)}) return d def close(self): """退出API""" self.gateway.write_log(u'退出rabbit行情订阅API') self.connection_status = False try: if self.sub: self.gateway.write_log(u'关闭订阅器') self.sub.close() if self.thread is not None: self.gateway.write_log(u'关闭订阅器接收线程') self.thread.join() except Exception as ex: self.gateway.write_error(u'退出rabbitMQ行情api异常:{}'.format(str(ex))) # ---------------------------------------------------------------------- def subscribe(self, subscribeReq): """订阅合约""" # 这里的设计是,如果尚未登录就调用了订阅方法 # 则先保存订阅请求,登录完成后会自动订阅 vn_symbol = str(subscribeReq.symbol) vn_symbol = vn_symbol.upper() if vn_symbol not in self.registed_symbol_set: self.registed_symbol_set.add(vn_symbol) self.gateway.write_log(u'RabbitMQ行情订阅 {}'.format(str(vn_symbol))) class TqMdApi(): """天勤行情API""" def __init__(self, gateway): """""" super().__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.api = None self.is_connected = False self.subscribe_array = [] # 行情对象列表 self.quote_objs = [] # 数据更新线程 self.update_thread = None # 所有的合约 self.all_instruments = [] self.ticks = {} self.last_ticks_info = {} def connect(self, setting): """""" try: from tqsdk import TqApi self.api = TqApi(url="wss://u.shinnytech.com/t/md/front/mobile") except Exception as e: self.gateway.write_log(f'天勤行情API接入异常'.format(str(e)),on_log=True) if self.api: self.is_connected = True self.gateway.write_log(f'天勤行情API已连接') self.update_thread = Thread(target=self.update) self.update_thread.start() def check_status(self): """检查接口状态""" pass def generate_tick_from_quote(self, vt_symbol, quote) -> TickData: """ 生成TickData """ # 清洗 nan quote = {k: 0 if v != v else v for k, v in quote.items()} symbol, exchange = extract_vt_symbol(vt_symbol) tick_dt = datetime.strptime(quote["datetime"], "%Y-%m-%d %H:%M:%S.%f") today_volume = quote["volume"] last_tick_info = self.last_ticks_info.get(symbol, {}) trading_day = get_trading_date(tick_dt) last_trading_day = last_tick_info.get('trading_day', None) if last_trading_day == trading_day: volume_changed = max(0, today_volume - last_tick_info.get('volume', 0)) else: volume_changed = today_volume self.last_ticks_info.update({symbol: {'trading_day': trading_day, 'volume': today_volume}}) tick = TickData( symbol=symbol, exchange=exchange, datetime=tick_dt, trading_day=trading_day, name=symbol, volume=today_volume, last_volume=volume_changed, open_interest=quote["open_interest"], last_price=quote["last_price"], limit_up=quote["upper_limit"], limit_down=quote["lower_limit"], open_price=quote["open"], high_price=quote["highest"], low_price=quote["lowest"], pre_close=quote["pre_close"], bid_price_1=quote["bid_price1"], bid_price_2=quote["bid_price2"], bid_price_3=quote["bid_price3"], bid_price_4=quote["bid_price4"], bid_price_5=quote["bid_price5"], ask_price_1=quote["ask_price1"], ask_price_2=quote["ask_price2"], ask_price_3=quote["ask_price3"], ask_price_4=quote["ask_price4"], ask_price_5=quote["ask_price5"], bid_volume_1=quote["bid_volume1"], bid_volume_2=quote["bid_volume2"], bid_volume_3=quote["bid_volume3"], bid_volume_4=quote["bid_volume4"], bid_volume_5=quote["bid_volume5"], ask_volume_1=quote["ask_volume1"], ask_volume_2=quote["ask_volume2"], ask_volume_3=quote["ask_volume3"], ask_volume_4=quote["ask_volume4"], ask_volume_5=quote["ask_volume5"], gateway_name=self.gateway_name ) if symbol.endswith('99') and tick.ask_price_1 == 0.0 and tick.bid_price_1 == 0.0: price_tick = quote['price_tick'] if isinstance(price_tick, float) or isinstance(price_tick, int): tick.ask_price_1 = tick.last_price + price_tick tick.ask_volume_1 = 1 tick.bid_price_1 = tick.last_price - price_tick tick.bid_volume_1 = 1 return tick def update(self) -> None: """ 更新行情/委托/账户/持仓 """ while self.is_connected: deadline = time() + 5 self.api.wait_update(deadline=deadline) # 更新行情信息 for vt_symbol, quote in self.quote_objs: if self.api.is_changing(quote): tick = self.generate_tick_from_quote(vt_symbol, quote) if tick: self.gateway.on_tick(tick) self.gateway.on_custom_tick(tick) def subscribe(self, req: SubscribeRequest) -> None: """ 订阅行情 """ if req.vt_symbol not in self.subscribe_array: symbol, exchange = extract_vt_symbol(req.vt_symbol) try: quote = self.api.get_quote(vt_to_tq_symbol(symbol, exchange)) self.quote_objs.append((req.vt_symbol, quote)) self.subscribe_array.append(req.vt_symbol) except Exception as ex: self.gateway.write_log('订阅天勤行情异常:{}'.format(str(ex)),on_log=True) def query_contracts(self) -> None: """""" self.all_instruments = [ v for k, v in self.api._data["quotes"].items() if v["expired"] == False ] for contract in self.all_instruments: if ( "SSWE" in contract["instrument_id"] or "CSI" in contract["instrument_id"] ): # vnpy没有这两个交易所,需要可以自行修改vnpy代码 continue vt_symbol = tq_to_vt_symbol(contract["instrument_id"]) symbol, exchange = extract_vt_symbol(vt_symbol) if TQ2VT_TYPE[contract["ins_class"]] == Product.OPTION: contract_data = ContractData( symbol=symbol, exchange=exchange, name=symbol, product=TQ2VT_TYPE[contract["ins_class"]], size=contract["volume_multiple"], pricetick=contract["price_tick"], history_data=True, option_strike=contract["strike_price"], option_underlying=tq_to_vt_symbol(contract["underlying_symbol"]), option_type=OptionType[contract["option_class"]], option_expiry=datetime.fromtimestamp(contract["expire_datetime"]), option_index=tq_to_vt_symbol(contract["underlying_symbol"]), gateway_name=self.gateway_name, ) else: contract_data = ContractData( symbol=symbol, exchange=exchange, name=symbol, product=TQ2VT_TYPE[contract["ins_class"]], size=contract["volume_multiple"], pricetick=contract["price_tick"], history_data=True, gateway_name=self.gateway_name, ) self.gateway.on_contract(contract_data) def query_history(self, req: HistoryRequest) -> List[BarData]: """ 获取历史数据 """ symbol = req.symbol exchange = req.exchange interval = req.interval start = req.start end = req.end # 天勤需要的数据 tq_symbol = vt_to_tq_symbol(symbol, exchange) tq_interval = INTERVAL_VT2TQ.get(interval) end += timedelta(1) total_days = end - start # 一次最多只能下载 8964 根Bar min_length = min(8964, total_days.days * 500) df = self.api.get_kline_serial(tq_symbol, tq_interval, min_length).sort_values( by=["datetime"] ) # 时间戳对齐 df["datetime"] = pd.to_datetime(df["datetime"] + TIME_GAP) # 过滤开始结束时间 df = df[(df["datetime"] >= start - timedelta(days=1)) & (df["datetime"] < end)] data: List[BarData] = [] if df is not None: for ix, row in df.iterrows(): bar = BarData( symbol=symbol, exchange=exchange, interval=interval, datetime=row["datetime"].to_pydatetime(), open_price=row["open"], high_price=row["high"], low_price=row["low"], close_price=row["close"], volume=row["volume"], open_interest=row.get("close_oi", 0), gateway_name=self.gateway_name, ) data.append(bar) return data def close(self) -> None: """""" try: if self.api: self.api.close() self.is_connected = False if self.update_thread: self.update_thread.join() except Exception as e: self.gateway.write_log('退出天勤行情api异常:{}'.format(str(e)),on_log=True)
ServiceController.py
from honeygrove import log from honeygrove.config import Config from honeygrove.services import ServiceBaseModel # from honeygrove.tests.testresources import serviceControllerTestPkg # Actually used import threading from twisted.internet import reactor class ServiceController(): def __init__(self): """ Instantiates all subclasses of ServiceBaseModel and keeps track of them in a dict. """ threading.Thread(target=reactor.run, args=(False,)).start() self.serviceList = [] for service in ServiceBaseModel.ServiceBaseModel.__subclasses__(): self.serviceList.append(service()) self.serviceDict = dict([(service._name, service) for service in self.serviceList]) self.listen = self.serviceDict[Config.listen.name] self.runningServicesDict = dict([]) def startService(self, name): """ Starts the given service and adds it to threadDict :param name: Name of the service (str) """ service = self.serviceDict[name] address = service._address if service._port: address += ":{}".format(service._port) log.info("{}: Starting on {}".format(name, address)) if name not in self.runningServicesDict: if name not in Config.multiple_port_services: self.listen.stopOnPort(service._port) service.startService() self.runningServicesDict[name] = service return True else: return False def stopService(self, name): """ Stops the given service and removes it from threadDict :param name: Name of the service (str) """ log.info("Stop Service: " + name) if name in self.runningServicesDict: self.serviceDict[name].stopService() self.runningServicesDict.pop(name) if name not in Config.noPortSpecificService: self.listen.startOnPort(self.serviceDict[name]._port) reactor.callFromThread(reactor.stop) return True else: reactor.callFromThread(reactor.stop) return False
test_websocket_integration.py
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2018, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Test for the Websocket client integration.""" from unittest import mock from threading import Thread from queue import Queue from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister from qiskit.test import slow_test from qiskit.compiler import assemble, transpile from qiskit.providers import JobTimeoutError from qiskit.providers.ibmq.api.clients.websocket import ( WebsocketClient, WebsocketAuthenticationMessage) from qiskit.providers.ibmq.api.clients import AccountClient from qiskit.providers.jobstatus import JobStatus from ...ibmqtestcase import IBMQTestCase from ...decorators import requires_provider, requires_device class TestWebsocketIntegration(IBMQTestCase): """Websocket integration tests.""" @requires_provider def setUp(self, provider): # pylint: disable=arguments-differ self.provider = provider self.sim_backend = self.provider.get_backend(simulator=True) # Create a circuit qr = QuantumRegister(1) cr = ClassicalRegister(1) self.qc1 = QuantumCircuit(qr, cr, name='qc1') self.qc1.measure(qr[0], cr[0]) # Create a default Qobj using the simulator. self.circuit = transpile(self.qc1, backend=self.sim_backend) self.qobj = assemble(self.circuit, backend=self.sim_backend, shots=1) def test_websockets_simulator(self): """Test checking status of a job via websockets for a simulator.""" job = self.sim_backend.run(self.qobj) # Manually disable the non-websocket polling. job._api._job_final_status_polling = None result = job.result() self.assertEqual(result.status, 'COMPLETED') @slow_test @requires_device def test_websockets_device(self, backend): """Test checking status of a job via websockets for a device.""" qc = transpile(self.qc1, backend=backend) qobj = assemble(qc, backend=backend) job = backend.run(qobj) # Manually disable the non-websocket polling. job._api._job_final_status_polling = None job.wait_for_final_state(wait=300, callback=self.simple_job_callback) result = job.result() self.assertTrue(result.success) def test_websockets_job_final_state(self): """Test checking status of a job in a final state via websockets.""" job = self.sim_backend.run(self.qobj) job._wait_for_completion() # Manually disable the non-websocket polling. job._api._job_final_status_polling = None # Pretend we haven't seen the final status job._status = JobStatus.RUNNING job._wait_for_completion() self.assertIs(job._status, JobStatus.DONE) def test_websockets_retry_bad_url(self): """Test http retry after websocket error due to an invalid URL.""" job = self.sim_backend.run(self.qobj) saved_websocket_url = job._api.client_ws.websocket_url try: # Use fake websocket address. job._api.client_ws.websocket_url = 'wss://wss.localhost' # _wait_for_completion() should retry with http successfully # after getting websockets error. job._wait_for_completion() finally: job._api.client_ws.websocket_url = saved_websocket_url self.assertIs(job._status, JobStatus.DONE) @mock.patch.object(WebsocketClient, '_authentication_message', return_value=WebsocketAuthenticationMessage( type_='authentication', data='phantom_token')) def test_websockets_retry_bad_auth(self, _): """Test http retry after websocket error due to a failed authentication.""" job = self.sim_backend.run(self.qobj) with mock.patch.object(AccountClient, 'job_status', side_effect=job._api.job_status) as mocked_wait: job._wait_for_completion() self.assertIs(job._status, JobStatus.DONE) mocked_wait.assert_called_with(job.job_id()) def test_websockets_retry_connection_closed(self): """Test http retry after websocket error due to closed connection.""" def _job_status_side_effect(*args, **kwargs): """Side effect function to restore job ID""" # pylint: disable=unused-argument job._job_id = saved_job_id return saved_job_status(saved_job_id) job = self.sim_backend.run(self.qobj) # Save the originals. saved_job_id = job._job_id saved_job_status = job._api.job_status # Use bad job ID to fail the status retrieval. job._job_id = '12345' # job.result() should retry with http successfully after getting websockets error. with mock.patch.object(AccountClient, 'job_status', side_effect=_job_status_side_effect): job._wait_for_completion() self.assertIs(job._status, JobStatus.DONE) def test_websockets_timeout(self): """Test timeout checking status of a job via websockets.""" qc = transpile(self.qc1, backend=self.sim_backend) qobj = assemble(qc, backend=self.sim_backend, shots=2048) job = self.sim_backend.run(qobj) with self.assertRaises(JobTimeoutError): job.result(timeout=0.1) def test_websockets_multi_job(self): """Test checking status of multiple jobs in parallel via websockets.""" def _run_job_get_result(q): job = self.sim_backend.run(self.qobj) # Manually disable the non-websocket polling. job._api._job_final_status_polling = None job._wait_for_completion() if job._status is not JobStatus.DONE: q.put(False) max_threads = 2 result_q = Queue() job_threads = [] for i in range(max_threads): job_thread = Thread(target=_run_job_get_result, args=(result_q,), name="job_result_{}".format(i), daemon=True) job_thread.start() job_threads.append(job_thread) for job_thread in job_threads: job_thread.join() self.assertTrue(result_q.empty())
TCP_echo_server.py
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import argparse import selectors import signal import socket import sys import time import traceback from threading import Condition, Thread from typing import Union from system_test import Logger from system_test import TIMEOUT class ClientRecord: """ Object to register with the selector 'data' field for incoming user connections. This is *not* used for the listening socket. This object holds the socketId in the address and the inbound and outbound data list buffers for this socket's payload. """ def __init__(self, address): self.addr = address self.inb = b'' self.outb = b'' def __repr__(self): return str(self.addr) + " len(in)=" + str(len(self.inb)) + " len(out)=" + str(len(self.outb)) def __str__(self): return self.__repr__() class GracefulExitSignaler: kill_now = False def __init__(self): signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) def exit_gracefully(self, signum, frame): self.kill_now = True def split_chunk_for_display(raw_bytes): """ Given some raw bytes, return a display string Only show the beginning and end of largish (2x CONTENT_CHUNK_SIZE) arrays. :param raw_bytes: :return: display string """ CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo client, too if len(raw_bytes) > 2 * CONTENT_CHUNK_SIZE: result = repr(raw_bytes[:CONTENT_CHUNK_SIZE]) + " ... " + repr(raw_bytes[-CONTENT_CHUNK_SIZE:]) else: result = repr(raw_bytes) return result class TcpEchoServer: def __init__(self, prefix="ECHO_SERVER", port: Union[str, int] = "0", echo_count=0, timeout=0.0, logger=None, conn_stall=0.0, close_on_conn=False, close_on_data=False) -> None: """ Start echo server in separate thread :param prefix: log prefix :param port: port to listen on :param echo_count: exit after echoing this many bytes :param timeout: exit after this many seconds :param logger: Logger() object """ self.sock: socket.socket self.prefix = prefix self.port = int(port) self.echo_count = echo_count self.timeout = timeout self.logger = logger self.conn_stall = conn_stall self.close_on_conn = close_on_conn self.close_on_data = close_on_data self.keep_running = True self.HOST = '127.0.0.1' self._cv = Condition() self._is_running = None self.exit_status = None self.error = None self._thread = Thread(target=self.run) self._thread.daemon = True self._thread.start() @property def is_running(self): with self._cv: self._cv.wait_for(lambda: self._is_running is not None, timeout=10) return self._is_running @is_running.setter def is_running(self, value): with self._cv: self._is_running = value self._cv.notify_all() def get_listening_port(self) -> int: address, port, *_ = self.sock.getsockname() return port def run(self): """ Run server in daemon thread. A single thread runs multiple sockets through selectors. Note that timeouts and such are done in line and processing stops for all sockets when one socket is timing out. For the intended one-at-a-time test cases this works but it is not general solution for all cases. :return: """ try: # set up spontaneous exit settings start_time = time.time() total_echoed = 0 # set up listening socket try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind((self.HOST, self.port)) self.sock.listen() if self.port == 0: self.port = self.get_listening_port() self.sock.setblocking(False) self.logger.log('%s Listening on host:%s, port:%s' % (self.prefix, self.HOST, self.port)) except Exception: self.error = ('%s Opening listen socket %s:%s exception: %s' % (self.prefix, self.HOST, self.port, traceback.format_exc())) self.logger.log(self.error) return 1 # notify whoever is waiting on the condition variable for this self.is_running = True # set up selector sel = selectors.DefaultSelector() sel.register(self.sock, selectors.EVENT_READ, data=None) # event loop while True: if not self.keep_running: self.exit_status = "INFO: command shutdown:" break if self.timeout > 0.0: elapsed = time.time() - start_time if elapsed > self.timeout: self.exit_status = "Exiting due to timeout. Total echoed = %d" % total_echoed break if self.echo_count > 0: if total_echoed >= self.echo_count: self.exit_status = "Exiting due to echo byte count. Total echoed = %d" % total_echoed break events = sel.select(timeout=0.1) if events: for key, mask in events: if key.data is None: if key.fileobj is self.sock: self.do_accept(key.fileobj, sel, self.logger, self.conn_stall, self.close_on_conn) else: pass # Only listener 'sock' has None in opaque data field else: n_echoed = self.do_service(key, mask, sel, self.logger, self.close_on_data) total_echoed += n_echoed if n_echoed > 0 else 0 else: pass # select timeout. probably. sel.unregister(self.sock) self.sock.close() except Exception: self.error = "ERROR: exception : '%s'" % traceback.format_exc() self.is_running = False def do_accept(self, sock, sel, logger, conn_stall, close_on_conn): conn, addr = sock.accept() logger.log('%s Accepted connection from %s:%d' % (self.prefix, addr[0], addr[1])) if conn_stall > 0.0: logger.log('%s Connection from %s:%d stall start' % (self.prefix, addr[0], addr[1])) time.sleep(conn_stall) logger.log('%s Connection from %s:%d stall end' % (self.prefix, addr[0], addr[1])) if close_on_conn: logger.log('%s Connection from %s:%d closing due to close_on_conn' % (self.prefix, addr[0], addr[1])) conn.close() return conn.setblocking(False) events = selectors.EVENT_READ | selectors.EVENT_WRITE sel.register(conn, events, data=ClientRecord(addr)) def do_service(self, key, mask, sel, logger, close_on_data): retval = 0 sock = key.fileobj data = key.data if mask & selectors.EVENT_READ: try: recv_data = sock.recv(1024) except IOError: logger.log('%s Connection to %s:%d IOError: %s' % (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) sel.unregister(sock) sock.close() return 0 except Exception: self.error = ('%s Connection to %s:%d exception: %s' % (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) logger.log(self.error) sel.unregister(sock) sock.close() return 1 if recv_data: data.outb += recv_data if close_on_data: logger.log('%s Connection to %s:%d closed due to close_on_data' % (self.prefix, data.addr[0], data.addr[1])) sel.unregister(sock) sock.close() return 0 logger.log('%s read from: %s:%d len:%d: %s' % (self.prefix, data.addr[0], data.addr[1], len(recv_data), split_chunk_for_display(recv_data))) sel.modify(sock, selectors.EVENT_READ | selectors.EVENT_WRITE, data=data) else: while data.outb: logger.log('%s Client closed: flush client input to %s:%d' % (self.prefix, data.addr[0], data.addr[1])) try: sent = sock.send(data.outb) data.outb = data.outb[sent:] except IOError: logger.log('%s Connection to %s:%d IOError: %s' % (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) sel.unregister(sock) sock.close() return 0 except Exception: self.error = ('%s Connection to %s:%d exception: %s' % (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) logger.log(self.error) sel.unregister(sock) sock.close() return 1 logger.log('%s Client closed: closing connection to %s:%d' % (self.prefix, data.addr[0], data.addr[1])) sel.unregister(sock) sock.close() return 0 if mask & selectors.EVENT_WRITE: if data.outb: try: sent = sock.send(data.outb) except IOError: logger.log('%s Connection to %s:%d IOError: %s' % (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) sel.unregister(sock) sock.close() return 0 except Exception: self.error = ('%s Connection to %s:%d exception: %s' % (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) logger.log(self.error) sel.unregister(sock) sock.close() return 1 retval += sent if sent > 0: logger.log('%s write to : %s:%d len:%d: %s' % (self.prefix, data.addr[0], data.addr[1], sent, split_chunk_for_display(data.outb[:sent]))) else: logger.log('%s write to : %s:%d len:0' % (self.prefix, data.addr[0], data.addr[1])) data.outb = data.outb[sent:] else: sel.modify(sock, selectors.EVENT_READ, data=data) return retval def wait(self, timeout=TIMEOUT): self.logger.log("%s Server is shutting down" % self.prefix) self.keep_running = False self._thread.join(timeout) def main(argv): retval = 0 logger = None # parse args p = argparse.ArgumentParser() p.add_argument('--port', '-p', help='Required listening port number') p.add_argument('--name', help='Optional logger prefix') p.add_argument('--echo', '-e', type=int, default=0, const=1, nargs="?", help='Exit after echoing this many bytes. Default value "0" disables exiting on byte count.') p.add_argument('--timeout', '-t', type=float, default=0.0, const=1, nargs="?", help='Timeout in seconds. Default value "0.0" disables timeouts') p.add_argument('--log', '-l', action='store_true', help='Write activity log to console') # Add controlled server misbehavior for testing conditions seen in the field # Stall required to trigger Q2 testing for DISPATCH-1947 and improving test DISPATCH-1981 p.add_argument('--connect-stall', type=float, default=0.0, const=1, nargs="?", help='Accept connections but wait this many seconds before reading from socket. Default value "0.0" disables stall') # Close on connect - exercises control paths scrutinized under DISPATCH-1968 p.add_argument('--close-on-connect', action='store_true', help='Close client connection without reading from socket when listener connects. If stall is specified then stall before closing.') # Close on data - exercises control paths scrutinized under DISPATCH-1968 p.add_argument('--close-on-data', action='store_true', help='Close client connection as soon as data arrives.') del argv[0] args = p.parse_args(argv) # port if args.port is None: raise Exception("User must specify a port number") port = args.port # name / prefix prefix = args.name if args.name is not None else "ECHO_SERVER (%s)" % (str(port)) # echo if args.echo < 0: raise Exception("Echo count must be greater than zero") # timeout if args.timeout < 0.0: raise Exception("Timeout must be greater than or equal to zero") # timeout if args.connect_stall < 0.0: raise Exception("Connect-stall must be greater than or equal to zero") signaller = GracefulExitSignaler() server = None try: # logging logger = Logger(title="%s port %s" % (prefix, port), print_to_console=args.log, save_for_dump=False) server = TcpEchoServer(prefix, port, args.echo, args.timeout, logger, args.connect_stall, args.close_on_connect, args.close_on_data) keep_running = True while keep_running: time.sleep(0.1) if server.error is not None: logger.log("%s Server stopped with error: %s" % (prefix, server.error)) keep_running = False retval = 1 if server.exit_status is not None: logger.log("%s Server stopped with status: %s" % (prefix, server.exit_status)) keep_running = False if signaller.kill_now: logger.log("%s Process killed with signal" % prefix) keep_running = False if keep_running and not server.is_running: logger.log("%s Server stopped with no error or status" % prefix) keep_running = False except Exception: if logger is not None: logger.log("%s Exception: %s" % (prefix, traceback.format_exc())) retval = 1 if server is not None and server.sock is not None: server.sock.close() return retval if __name__ == "__main__": sys.exit(main(sys.argv))
via_app_data.py
"""Bootstrap""" from __future__ import absolute_import, unicode_literals import logging from contextlib import contextmanager from threading import Lock, Thread from virtualenv.info import fs_supports_symlink from virtualenv.seed.embed.base_embed import BaseEmbed from virtualenv.seed.embed.wheels.acquire import get_wheels from virtualenv.util.path import safe_delete from .pip_install.copy import CopyPipInstall from .pip_install.symlink import SymlinkPipInstall class FromAppData(BaseEmbed): def __init__(self, options): super(FromAppData, self).__init__(options) self.symlinks = options.symlink_app_data self.base_cache = self.app_data / "seed-app-data" / "v1.0.1" @classmethod def add_parser_arguments(cls, parser, interpreter, app_data): super(FromAppData, cls).add_parser_arguments(parser, interpreter, app_data) can_symlink = app_data.transient is False and fs_supports_symlink() parser.add_argument( "--symlink-app-data", dest="symlink_app_data", action="store_true" if can_symlink else "store_false", help="{} symlink the python packages from the app-data folder (requires seed pip>=19.3)".format( "" if can_symlink else "not supported - " ), default=False, ) def run(self, creator): if not self.enabled: return base_cache = self.base_cache / creator.interpreter.version_release_str with self._get_seed_wheels(creator, base_cache) as name_to_whl: pip_version = name_to_whl["pip"].stem.split("-")[1] if "pip" in name_to_whl else None installer_class = self.installer_class(pip_version) def _install(name, wheel): logging.debug("install %s from wheel %s via %s", name, wheel, installer_class.__name__) image_folder = base_cache.path / "image" / installer_class.__name__ / wheel.stem installer = installer_class(wheel, creator, image_folder) if not installer.has_image(): installer.build_image() installer.install(creator.interpreter.version_info) threads = list(Thread(target=_install, args=(n, w)) for n, w in name_to_whl.items()) for thread in threads: thread.start() for thread in threads: thread.join() @contextmanager def _get_seed_wheels(self, creator, base_cache): with base_cache.lock_for_key("wheels"): wheels_to = base_cache.path / "wheels" if wheels_to.exists(): safe_delete(wheels_to) wheels_to.mkdir(parents=True, exist_ok=True) name_to_whl, lock = {}, Lock() def _get(package, version): result = get_wheels( creator.interpreter.version_release_str, wheels_to, self.extra_search_dir, self.download, {package: version}, self.app_data, ) with lock: name_to_whl.update(result) threads = list(Thread(target=_get, args=(pkg, v)) for pkg, v in self.package_version().items()) for thread in threads: thread.start() for thread in threads: thread.join() yield name_to_whl def installer_class(self, pip_version): if self.symlinks and pip_version: # symlink support requires pip 19.3+ pip_version_int = tuple(int(i) for i in pip_version.split(".")[0:2]) if pip_version_int >= (19, 3): return SymlinkPipInstall return CopyPipInstall def __unicode__(self): base = super(FromAppData, self).__unicode__() msg = ", via={}, app_data_dir={}".format("symlink" if self.symlinks else "copy", self.base_cache.path) return base[:-1] + msg + base[-1]
close_poet.py
# Re-implmenetation of POET with ARS instead of ES and with A1 robot on # procedurally generated environments import numpy as np import multiprocessing as mp from multiprocessing import Process, Pipe import argparse from scenes.poet_env import create_poet_env from copy import deepcopy import random import time from environment import env_loader class Hp(): def __init__(self): self.nb_steps = 5 self.episode_length = 420 self.learning_rate = 0.001 self.nb_directions = 32 self.nb_best_directions = 16 assert self.nb_best_directions <= self.nb_directions self.noise = 0.005 self.seed = 187 #self.env_name = 'HalfCheetahBulletEnv-v0' # Multiprocess Exploring the policy on one specific direction and over one episode _RESET = 1 _CLOSE = 2 _EXPLORE = 3 _CHANGE = 4 _EVALUATE = 5 def ExploreWorker(rank, childPipe): env = env_loader.load("DIRECT", "control_velocity") nb_inputs = env.get_state_space() n = 0 while True: n += 1 try: # Only block for short times to have keyboard exceptions be raised. if not childPipe.poll(0.001): continue message, payload = childPipe.recv() except (EOFError, KeyboardInterrupt): break if message == _RESET: observation_n = env.soft_reset()[0] policy.reset() childPipe.send(["reset ok"]) continue if message == _CHANGE: E = payload[0] env.hard_reset(E) childPipe.send(["change ok"]) continue if message == _EVALUATE: E = payload[0] policy = payload[1] hp = payload[2] state = env.hard_reset(E)[0] policy.reset() done = False num_plays = 0. sum_rewards = 0 while not done and num_plays < hp.episode_length: policy.observe(state) state = policy.normalize(state) action = policy.evaluate(state, delta, direction, hp) state, reward, done = env.step(action) sum_rewards += reward num_plays += 1 childPipe.send([sum_rewards]) continue if message == _EXPLORE: policy = payload[0] hp = payload[1] direction = payload[2] delta = payload[3] state = env.soft_reset()[0] policy.reset() done = False num_plays = 0. sum_rewards = 0 while not done and num_plays < hp.episode_length: policy.observe(state) state = policy.normalize(state) action = policy.evaluate(state, delta, direction, hp) state, reward, done = env.step(action) #reward += reward#max(min(reward, 1), -1) sum_rewards += reward num_plays += 1 childPipe.send([sum_rewards]) continue if message == _CLOSE: childPipe.send(["close ok"]) break childPipe.close() # Normalizing the states class Normalizer(): def __init__(self, nb_inputs): self.n = np.zeros(nb_inputs) self.mean = np.zeros(nb_inputs) self.mean_diff = np.zeros(nb_inputs) self.var = np.zeros(nb_inputs) def observe(self, x): # print(x.shape, self.mean.shape, self.n.shape) self.n += 1. last_mean = self.mean.copy() self.mean += (x - self.mean) / self.n self.mean_diff += (x - last_mean) * (x - self.mean) self.var = (self.mean_diff / self.n).clip(min=1e-2) def normalize(self, inputs): obs_mean = self.mean obs_std = np.sqrt(self.var) return (inputs - obs_mean) / obs_std def set_n_mu_diff(self, n, mu, diff): self.n = n self.mean = mu self.mean_diff = diff def save_n_mu_diff(self): self._n = self.n self._mean = self.mean self._mean_diff = self.mean_diff def get_saved_n_mu_diff(self): return self._n, self._mean, self._mean_diff def sigmoid(x): return 1/(1+np.exp(-x)) class Policy(): def __init__(self, input_size, hidden_size, gru_input, gru_hidden, output_size, args): self.gru_input = gru_input self.gru_hidden = gru_hidden self.input_size = input_size self.normalizer = Normalizer(input_size) try: # MLP # self.theta1 = np.load(args.policy[0]) # self.theta2 = np.load(args.policy[1]) # GRU self.theta1 = np.load(args.policy[0]) self.theta2 = np.load(args.policy[1]) self.theta3 = np.load(args.policy[2]) self.x2h = np.load(args.policy[3]) self.h2h = np.load(args.policy[4]) self.x2r = np.load(args.policy[5]) self.h2r = np.load(args.policy[6]) self.x2z = np.load(args.policy[7]) self.h2z = np.load(args.policy[8]) except: # MLP # self.theta1 = np.zeros((hidden_size, input_size)) # self.theta2 = np.zeros((output_size, hidden_size)) # GRU self.theta1 = np.zeros((hidden_size, input_size+gru_hidden)) self.theta2 = np.zeros((hidden_size, hidden_size)) self.theta3 = np.zeros((output_size, hidden_size)) self.x2h = np.zeros((gru_hidden, gru_input)) self.h2h = np.zeros((gru_hidden, gru_hidden)) self.x2r = np.zeros((gru_hidden, gru_input)) self.h2r = np.zeros((gru_hidden, gru_hidden)) self.x2z = np.zeros((gru_hidden, gru_input)) self.h2z = np.zeros((gru_hidden, gru_hidden)) self.hx = np.zeros((gru_hidden,)) print("Starting policy theta=", self.theta1, self.theta2, self.theta3) parameters = self.theta1.size+self.theta2.size+self.theta3.size+3*self.x2h.size+3*self.h2h.size print(f"{parameters = }") def load_policy(self, policy_file): self.policy_file = policy_file _policy = np.load(policy_file) # MLP self.theta1 = _policy['arr_0'] self.theta2 = _policy['arr_1'] self.theta3 = _policy['arr_2'] # GRU self.x2r = _policy['arr_3'] self.h2r = _policy['arr_4'] self.x2z = _policy['arr_5'] self.h2z = _policy['arr_6'] self.x2h = _policy['arr_7'] self.h2h = _policy['arr_8'] self.normalizer.set_n_mu_diff(_policy['arr_9'], _policy['arr_10'], _policy['arr_11']) def reset(self): self.hx = np.zeros((self.gru_hidden,)) # pass def evaluate(self, input, delta, direction, hp): if direction is None: # WITH GRU r = sigmoid(self.x2r.dot(input[:self.gru_input])+self.h2r.dot(self.hx)) z = sigmoid(self.x2z.dot(input[:self.gru_input])+self.h2z.dot(self.hx)) h_tilde = np.tanh(self.x2h.dot(input[:self.gru_input])+r*(self.h2h.dot(self.hx))) self.hx = self.hx*(1-z)+z*h_tilde return np.tanh(self.theta3.dot (np.tanh(self.theta2.dot (np.tanh(self.theta1.dot( np.concatenate((input,self.hx),axis=0))))))) # return np.tanh(self.theta3.dot(np.maximum( # self.theta2.dot(np.maximum(self.theta1.dot( # np.concatenate((input,self.hx),axis=0)),0)),0))) # NO GRU # return np.tanh(self.theta2.dot(np.maximum(self.theta1.dot( # input),0))) elif direction == "positive": # WITH GRU r = sigmoid((self.x2r+hp.noise*delta[3]).dot(input[:self.gru_input])+ (self.h2r+hp.noise*delta[4]).dot(self.hx)) z = sigmoid((self.x2z+hp.noise*delta[5]).dot(input[:self.gru_input])+ (self.h2z+hp.noise*delta[6]).dot(self.hx)) h_tilde = np.tanh((self.x2h+hp.noise*delta[7]).dot(input[:self.gru_input])+ r*((self.h2h+hp.noise*delta[8]).dot(self.hx))) self.hx = self.hx*(1-z)+z*h_tilde return np.tanh((self.theta3 + hp.noise * delta[2]).dot( np.tanh((self.theta2 + hp.noise * delta[1]).dot( np.tanh((self.theta1 + hp.noise * delta[0]).dot( np.concatenate((input,self.hx),axis=0))))))) # return np.tanh((self.theta3 + hp.noise * delta[2]).dot( # np.maximum((self.theta2 + hp.noise * delta[1]).dot( # np.maximum((self.theta1 + hp.noise * delta[0]).dot( # np.concatenate((input,self.hx),axis=0)),0)),0))) # NO GRU # return np.tanh((self.theta2 + hp.noise * delta[1]).dot( # np.maximum((self.theta1 + hp.noise * delta[0]).dot(input),0))) else: # WITH GRU r = sigmoid((self.x2r-hp.noise*delta[3]).dot(input[:self.gru_input])+ (self.h2r-hp.noise*delta[4]).dot(self.hx)) z = sigmoid((self.x2z-hp.noise*delta[5]).dot(input[:self.gru_input])+ (self.h2z-hp.noise*delta[6]).dot(self.hx)) h_tilde = np.tanh((self.x2h-hp.noise*delta[7]).dot(input[:self.gru_input])+ r*((self.h2h-hp.noise*delta[8]).dot(self.hx))) self.hx = self.hx*(1-z)+z*h_tilde return np.tanh((self.theta3 - hp.noise * delta[2]).dot( np.tanh((self.theta2 - hp.noise * delta[1]).dot( np.tanh((self.theta1 - hp.noise * delta[0]).dot( np.concatenate((input,self.hx),axis=0))))))) # return np.tanh((self.theta3 - hp.noise * delta[2]).dot( # np.maximum((self.theta2 - hp.noise * delta[1]).dot( # np.maximum((self.theta1 - hp.noise * delta[0]).dot( # np.concatenate((input,self.hx),axis=0)),0)),0))) # NO GRU # return np.tanh((self.theta2 - hp.noise * delta[1]).dot( # np.maximum((self.theta1 - hp.noise * delta[0]).dot( # input),0))) def sample_deltas(self): # WITH GRU return [(np.random.randn(*self.theta1.shape), np.random.randn(*self.theta2.shape), np.random.randn(*self.theta3.shape), np.random.randn(*self.x2r.shape), np.random.randn(*self.h2r.shape), np.random.randn(*self.x2z.shape), np.random.randn(*self.h2z.shape), np.random.randn(*self.x2h.shape), np.random.randn(*self.h2h.shape)) for _ in range( hp.nb_directions)] # NO GRU # return [(np.random.randn(*self.theta1.shape), # np.random.randn(*self.theta2.shape)) for _ in range( # hp.nb_directions)] def update(self, rollouts, sigma_r): step1 = np.zeros(self.theta1.shape) step2 = np.zeros(self.theta2.shape) step3 = np.zeros(self.theta3.shape) step4 = np.zeros(self.x2r.shape) step5 = np.zeros(self.h2r.shape) step6 = np.zeros(self.x2z.shape) step7 = np.zeros(self.h2z.shape) step8 = np.zeros(self.x2h.shape) step9 = np.zeros(self.h2h.shape) for r_pos, r_neg, d in rollouts: step1 += (r_pos - r_neg) * d[0] step2 += (r_pos - r_neg) * d[1] step3 += (r_pos - r_neg) * d[2] step4 += (r_pos - r_neg) * d[3] step5 += (r_pos - r_neg) * d[4] step6 += (r_pos - r_neg) * d[5] step7 += (r_pos - r_neg) * d[6] step8 += (r_pos - r_neg) * d[7] step9 += (r_pos - r_neg) * d[8] self.theta1 += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step1 self.theta2 += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step2 self.theta3 += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step3 self.x2r += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step4 self.h2r += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step5 self.x2z += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step6 self.h2z += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step7 self.x2h += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step8 self.h2h += hp.learning_rate/(hp.nb_best_directions * sigma_r)*step9 def save(self, name): print(f"Save {name}") self.normalizer.save_n_mu_diff() n, mu, diff = self.normalizer.get_saved_n_mu_diff() # print(mu) # WITH GRU np.savez(args.logdir + name, self.theta1, self.theta2, self.theta3, self.x2r, self.h2r, self.x2z, self.h2z, self.x2h, self.h2h, n, mu, diff, allow_pickle=True) # NO GRU # np.savez(args.logdir + name, self.theta1, self.theta2, # n, mu, diff, allow_pickle=True) def observe(self, x): self.normalizer.observe(x) def normalize(self, inputs): return self.normalizer.normalize(inputs) # Exploring the policy on one specific direction and over one episode def explore_new_env(env, heightfield, policy, direction, delta, hp): state = env.hard_reset(heightfield)[0] policy.reset() done = False num_plays = 0. sum_rewards = 0 while not done and num_plays < hp.episode_length: policy.observe(state) state = policy.normalize(state) action = policy.evaluate(state, delta, direction, hp) state, reward, done = env.step(action) sum_rewards += reward num_plays += 1 return sum_rewards def explore(env, policy, direction, delta, hp): state = env.soft_reset()[0] policy.reset() done = False num_plays = 0. sum_rewards = 0 while not done and num_plays < hp.episode_length: policy.observe(state) state = policy.normalize(state) action = policy.evaluate(state, delta, direction, hp) state, reward, done = env.step(action) sum_rewards += reward num_plays += 1 return sum_rewards def train(env, heightfield, policy, hp, parentPipes): # Change environment for k in range(hp.nb_directions): parentPipe = parentPipes[k] parentPipe.send([_CHANGE, [heightfield]]) for k in range(hp.nb_directions): parentPipes[k].recv() for step in range(hp.nb_steps): # print(f"{step = }") # Initializing the perturbations deltas and the positive/negative rewards deltas = policy.sample_deltas() positive_rewards = [0] * hp.nb_directions negative_rewards = [0] * hp.nb_directions if parentPipes: for k in range(hp.nb_directions): parentPipe = parentPipes[k] parentPipe.send([_EXPLORE, [policy, hp, "positive", deltas[k]]]) for k in range(hp.nb_directions): positive_rewards[k] = parentPipes[k].recv()[0] for k in range(hp.nb_directions): parentPipe = parentPipes[k] parentPipe.send([_EXPLORE, [policy, hp, "negative", deltas[k]]]) for k in range(hp.nb_directions): negative_rewards[k] = parentPipes[k].recv()[0] # Gathering all the positive/negative rewards to compute the standard deviation of these rewards all_rewards = np.array(positive_rewards + negative_rewards) sigma_r = all_rewards.std() # Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions scores = { k: max(r_pos, r_neg) for k, (r_pos, r_neg) in enumerate(zip(positive_rewards, negative_rewards)) } order = sorted(scores.keys(), key=lambda x: -scores[x])[:hp.nb_best_directions] rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order] # Updating our policy policy.update(rollouts, sigma_r) # print(f"{step = }") return policy, explore_new_env(env, heightfield, policy, None, None, hp) def evaluate_agents(E, policies, hp, parentPipes): reward_list = [] # Change environment E heightfield = create_poet_env(E) for k in range(len(policies)): parentPipe = parentPipes[k] parentPipe.send([_EVALUATE, [heightfield, policies[k], hp]]) for k in range(len(policies)): reward_list.append(parentPipes[k].recv()[0]) best_reward = max(reward_list) return deepcopy(policies[reward_list.index(best_reward)]), best_reward def full_test(policies, hp, parentPipes): E = np.array([ [0.8, 0.8, 0, 0, 0, 0, 0, 0], [0, 0, 0.9, 0.2, 0, 0, 0, 0], [0, 0, 0, 0, 0.7, 0.80, 0, 0], [0, 0, 0, 0, 0, 0, 0.8, 0.30], [0.70, 0.7, 0.2, 0.2, 0.2, 0.2, 0.2, 0.20], [0.20, 0.2, 0.8, 0.2, 0.2, 0.2, 0.2, 0.20], [0.20, 0.2, 0.2, 0.2, 0.6, 0.7, 0.2, 0.20], [0.20, 0.2, 0.2, 0.2, 0.2, 0.2, 0.7, 0.30] ]) total_reward_count = 0 for i in range(len(E)): random.seed(1) np.random.seed(1) heightfield = create_poet_env(E[i]) reward_list = [] for k in range(len(policies)): parentPipe = parentPipes[k] parentPipe.send([_EVALUATE, [heightfield, policies[k], hp]]) for k in range(len(policies)): reward_list.append(parentPipes[k].recv()[0]) total_reward_count += sum(reward_list) return total_reward_count if __name__ == "__main__": mp.freeze_support() parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--seed', help='RNG seed', type=int, default=1) parser.add_argument('--steps', help='Number of steps per training', type=int, default=10) parser.add_argument('--iterations', help='Number of iterations', type=int, default=100) parser.add_argument('--policy', help='Starting policy file (npy)', type=str, default='') parser.add_argument( '--logdir', help='Directory root to log policy files (npy)', type=str, default='.') parser.add_argument('--max_num_envs', type=int, default=32) args = parser.parse_args() hp = Hp() # hp.env_name = args.env hp.seed = int(time.time()*1000)%100000 hp.nb_steps = args.steps print("seed = ", hp.seed) np.random.seed(hp.seed) parentPipes = None num_processes = hp.nb_directions processes = [] childPipes = [] parentPipes = [] for pr in range(num_processes): parentPipe, childPipe = Pipe() parentPipes.append(parentPipe) childPipes.append(childPipe) for rank in range(num_processes): p = mp.Process(target=ExploreWorker, args=(rank, childPipes[rank])) p.start() processes.append(p) env = env_loader.load("DIRECT", "control_velocity") nb_inputs = env.get_state_space() nb_outputs = env.get_action_space() E = np.array([ [0.00, 0.00, 0, 0, 0, 0, 0, 0], [0.40, 0.20, 0, 0, 0, 0, 0, 0], [0.20, 0.40, 0, 0, 0, 0, 0, 0], [0.40, 0.40, 0, 0, 0, 0, 0, 0], [0.60, 0.40, 0, 0, 0, 0, 0, 0], [0.40, 0.60, 0, 0, 0, 0, 0, 0], [0.70, 0.70, 0, 0, 0, 0, 0, 0], [0.90, 0.90, 0, 0, 0, 0, 0, 0], [0, 0, 0.20, 0.90, 0, 0, 0, 0], [0, 0, 0.30, 0.80, 0, 0, 0, 0], [0, 0, 0.40, 0.70, 0, 0, 0, 0], [0, 0, 0.50, 0.60, 0, 0, 0, 0], [0, 0, 0.60, 0.50, 0, 0, 0, 0], [0, 0, 0.70, 0.40, 0, 0, 0, 0], [0, 0, 0.80, 0.30, 0, 0, 0, 0], [0, 0, 0.90, 0.20, 0, 0, 0, 0], [0, 0, 0, 0, 0.20, 0.10, 0, 0], [0, 0, 0, 0, 0.25, 0.20, 0, 0], [0, 0, 0, 0, 0.35, 0.30, 0, 0], [0, 0, 0, 0, 0.45, 0.40, 0, 0], [0, 0, 0, 0, 0.55, 0.50, 0, 0], [0, 0, 0, 0, 0.65, 0.60, 0, 0], [0, 0, 0, 0, 0.75, 0.70, 0, 0], [0, 0, 0, 0, 0.90, 0.80, 0, 0], [0, 0, 0, 0, 0, 0, 0.20, 0.90], [0, 0, 0, 0, 0, 0, 0.30, 0.80], [0, 0, 0, 0, 0, 0, 0.40, 0.70], [0, 0, 0, 0, 0, 0, 0.50, 0.60], [0, 0, 0, 0, 0, 0, 0.60, 0.50], [0, 0, 0, 0, 0, 0, 0.70, 0.40], [0, 0, 0, 0, 0, 0, 0.80, 0.30], [0, 0, 0, 0, 0, 0, 0.90, 0.20], ]) compare = [] EAR_list = [] print("Initial loading") for i in range(32): A = Policy(nb_inputs, 64, 29, 8, nb_outputs, args) A.load_policy('pre_trained.npz') R = explore_new_env(env, create_poet_env(E[i]),A, None, None, hp) EAR_list.append((E[i],A,R)) # Environment - Policy Pair List N_mutate = 1# mutation interval/freqeuncy N_transfer = 1# mutation transfer/frequency #---------- CLOSE-ENDED POET ----------# for i in range(args.iterations): print(f"\nIteration {i}") num_pairs = len(EAR_list) #----- GENERATE NEW ENVIRONMENTS -----# #----- OPTIMIZE POLICY IN EACH ENV -----# print("TRAINING") for j in range(num_pairs): E, A, _ = EAR_list[j] A, R = train(env, create_poet_env(E), A, hp, parentPipes) EAR_list[j] = (E, A, R) #----- ATTEMPT TRANSFER AND SWTICHING -----# if num_pairs>1 and i%N_transfer==0: for j in range(num_pairs): print(f"Testing all policies on env {j}") # evaluate all the policies from all the environment in this current env current_env, current_agent, current_best_reward = EAR_list[j] all_agents = [EA[1] for EA in EAR_list] # get all agents from the EA_pair_list best_agent, best_reward = evaluate_agents(current_env, all_agents, hp, parentPipes) if best_reward > current_best_reward: del current_agent EAR_list[j] = (current_env, best_agent, best_reward) print("Transfer successful") if ((i+1) % 10) == 0: compare.append(full_test(all_agents, hp, parentPipes)) print(compare[-1]) encoding_list = [] reward_list = [] # Saving everything for count, EAR_pair in enumerate(EAR_list): encoding_list.append(EAR_pair[0]) reward_list.append(EAR_pair[2]) EAR_pair[1].save('/'+str(count)+'.npz') print(f" {encoding_list}") print(f" {reward_list}") encoding_array = np.asarray(encoding_list) reward_array = np.asarray(reward_list) comparison = np.asarray(compare) print(comparison) # np.savez(args.logdir + '/archive', encoding_array, allow_pickle=True) np.savez(args.logdir + '/full_test', comparison, allow_pickle=True) np.savez(args.logdir + '/encoding', encoding_array, allow_pickle=True) np.savez(args.logdir + '/reward', reward_array, allow_pickle=True) for parentPipe in parentPipes: parentPipe.send([_CLOSE, "pay2"]) for p in processes: p.join()
skycam.py
#!/usr/bin/env python3 import os import time import zwoasi as asi from threading import Thread from queue import Queue from PIL import Image from glob import glob class SkyCam: """ SkyCam is an abstraction layer for zwoasi Python bindings """ @staticmethod def initialize(_library=None): """ Initialize ZWOASI SDK library The official SDK library can be obtained from this link: https://astronomy-imaging-camera.com/tets1/ Args: _library (str): ovveride default location for the library """ if _library is None: _library = os.path.dirname(os.path.realpath(__file__))\ + '/asi.so' asi.init(_library) @staticmethod def cameras(): """ List of conneted cameras Returns: list: List of camera names as sttings """ return asi.list_cameras() def __init__(self, _camera_id, _bandwidth=80): """ Initializes a SkyCam camera object This funtion automatically sets camera parameters to default settings. To change them use configure() Args: _camera_id (int): Camera ID in the cameras() list or it's name """ self.camera = asi.Camera(_camera_id) self.camera_info = self.camera.get_camera_property() self.camera.set_control_value(asi.ASI_BANDWIDTHOVERLOAD, _bandwidth) self.camera.stop_video_capture() self.camera.stop_exposure() self.configure() self.frame_buffer = Queue() self.frame_counter = 0 self.recorder = self.Recorder(self) self.camera.set_control_value(asi.ASI_GAIN, 150) self.camera.set_control_value(asi.ASI_EXPOSURE, 1000000) self.camera.set_control_value(asi.ASI_WB_B, 99) self.camera.set_control_value(asi.ASI_WB_R, 75) self.camera.set_control_value(asi.ASI_GAMMA, 60) self.camera.set_control_value(asi.ASI_BRIGHTNESS, 50) self.camera.set_control_value(asi.ASI_FLIP, 0) self.camera.start_video_capture() self.camera.set_image_type(asi.ASI_IMG_RAW8) def configure(self, _gain=None, _exposure=None, _wb_b=None,\ _wb_r=None, _gamma=None, _brightness=None, _flip=None,\ _bin=None, _roi=None, _drange=None,\ _color=None, _mode=None): """ Used to change camera parameters Args: _gain (int): Camera gain _exposure (int): Camera exposure in microseconds _wb_b (int): Camera whitebalance _wb_r (int): Camera whitebalance _gamma (int): Camera gamma _brightness (int): Camera brightness _flip (int): Picture flip, valuse can be 0 or 1 _bin (int): Picture binning, values can be 1 or 2 _roi (tuple): Region of interest, formatted as a tuple (x, y, width, height) _drange (int): Dynamic range, value can be 8 or 16 bits _color (bool): Camera oolor mode _mode (str): Capturing mode, value can be 'video' or 'piture' If set to 'picture', apturing is a lot slower. """ self.camera.stop_exposure() if _mode == 'video': self.camera.start_video_capture() elif _mode == 'picture': self.camera.stop_video_capture() if _mode is not None: self.mode = _mode if _exposure is not None: self.camera.set_control_value(asi.ASI_EXPOSURE, _exposure) if _gain is not None: self.camera.set_control_value(asi.ASI_GAIN, _gain) if _wb_b is not None: self.camera.set_control_value(asi.ASI_WB_B, _wb_b) if _wb_r is not None: self.camera.set_control_value(asi.ASI_WB_R, _wb_r) if _gamma is not None: self.camera.set_control_value(asi.ASI_GAMMA, _gamma) if _brightness is not None: self.camera.set_control_value(asi.ASI_BRIGHTNESS, _brightness) if _flip is not None: self.camera.set_control_value(asi.ASI_FLIP, _flip) if _bin is None: _bin = 1 if _roi is None: _roi = ( 0, 0, int(self.camera_info['MaxWidth'] / _bin), int(self.camera_info['MaxHeight'] / _bin) ) self.camera.set_roi(start_x=_roi[0], start_y=_roi[1],\ width=_roi[2], height=_roi[3], bins=_bin) if _color is True: self.camera.set_image_type(asi.ASI_IMG_RGB24) else: if _drange is 8: self.camera.set_image_type(asi.ASI_IMG_RAW8) elif _drange is 16: self.camera.set_image_type(asi.ASI_IMG_RAW16) def capture(self, _directory=None, _file=None, _format='.jpg'): """ Frame capturing function If both _directory and _file are not declared, it will return the picture as an array. Otherwise, undeclared parameters will fall back to default values. Args: _directory (str): Path for saving captured photos _file (str): File name, strftime formatting is enabled Formatting instrutions: http://strftime.org/ _format (str): Indiates piture format, default is JPEG Returns: numpy array: If both _directory and _file are not declared, it will only return the picture as an array. """ if _file is None and _directory is not None: _file = self.camera_info['Name'].replace(' ', '-') \ + '-%Y-%m-%d-%H-%M-%S-%Z-' +\ str(self.frame_counter) + _format self.frame_counter += 1 if _directory is None and _file is not None: if not os.path.isdir('/tmp/skycam/'): os.makedirs('/tmp/skycam', 755) _directory = '/tmp/skycam/' if _file is not None and _directory is not None: _file = time.strftime(_file) if self.mode == 'picture': self.camera.capture(filename=\ (_directory + '/' + _file)) elif self.mode == 'video': self.camera.capture_video_frame(filename=\ (_directory + '/' + _file)) if self.mode == 'picture': return self.camera.capture() elif self.mode == 'video': return self.camera.capture_video_frame() class Recorder: """ Recorder is used to record continuous frames automatically Note that Recorder class gets isntanced as SkyCam.recorder object. """ def __init__(self, _owner): """ Sets all variables to default state """ self.owner = _owner self.buffer = Queue() self.recording = False self.delay = 0 self.directory = None self.file = None self.format = None self.keep = True self.save = False def configure(self, _delay=None, _keep=None, _save=None,\ _directory=None, _file=None, _format=None): """ Configure SkyCam recorder Args: _delay (int): Delay between frames in milliseconds _keep (bool): Indicates whether to keep frames in RAM buffer _save (bool): Indicates whether to keep frames to storage device _directory (str): Path for saving captured photos _file (str): File name, strftime formatting is enabled Formatting instrutions: http://strftime.org/ _format (str): Indiates piture format, default is JPEG """ if _delay is not None: self.delay = _delay if _directory is not None: self.directory = _directory if _file is not None: self.file = _file if _format is not None: self.format = _format if _keep is not None: self.keep = _keep if _save is not None: self.save = _save def record(self): """ Recorder background thread Do not call this method directly. Use start() instead. """ while self.recording: if self.save: _frame = self.owner.capture(_directory=self.directory,\ _file=self.file, _format=self.format) else: _frame = self.owner.capture() if self.keep: self.buffer.put((_frame, time.time())) time.sleep(self.delay / 1000) def start(self): """ Starts background thread for recording """ self.recorder = Thread(target=self.record, args=()) self.recording = True self.recorder.start() def stop(self): """ Stops background thread for recording """ self.recording = False self.recorder.join() def buffer_is_empty(self): """ Ckeck if buffer is empty """ return self.buffer.empty() def buffer_next(self): """ Returns oldest frame in the buffer Calling this method requires _keep to be True. Returned frame gets removed from the buffer. """ return self.buffer.get_nowait() def buffer_all(self): """ Returns all frames stored in the buffer Calling this method requires _keep to be True. """ return list(self.buffer.queue) def buffer_clear(self): """ Clears the buffer """ self.buffer.clear() def buffer_load(self): """ Loads all frames stored in _directory into the buffer. Calling this method requires _directory to be defined to an existing direcotry. """ _files = glob(self.directory) print(_files)
rpc.py
from enum import Enum import time import logging import threading import select from msgpackio.client import Client from msgpackio.future import Future class LostFuture(Exception): pass REQUEST = 0 RESPONSE = 1 NOTIFY = 2 def _seq(): value = 0 while True: yield value value += 1 if value > (1 << 30): value = 0 log = logging.getLogger(__name__) class RPCClient: def __init__(self, client: Client): self.client = client self.client.connect() self.generator = _seq() self._pending_results = dict() self.sockets = [self.client.sock] self.keep_promises = True self.lock = threading.Lock() self.promise_keeper = threading.Thread(target=self._fetch_results) self.promise_keeper.daemon = True self.promise_keeper.start() def _fetch_results(self): while self.keep_promises: readable, _, _ = select.select(self.sockets, [], [], 0.01) for _ in readable: value = self.client.recv(0) self._set_future(value) def call(self, method, *args): result = self.send_request(method, args).get() return result def call_async(self, method, *args): return self.send_request(method, args) def send_request(self, method, args): msgid = next(self.generator) future = Future(self, msgid) with self.lock: self._pending_results[msgid] = future self.client.send([REQUEST, msgid, method, args]) return future def notify(self, method, *args): self.client.send([NOTIFY, method, args]) def close(self): self.keep_promises = False self.client.close() def __enter__(self): return self def __exit__(self, a, b, c): self.close() return def _set_future(self, value): _, msgid, error, result = value with self.lock: future = self._pending_results.pop(msgid, None) if future is None: log.error(f"Server replied to an unknown future") future.error = error future.result = result return msgid def _wait_future(self, timeout, target): start = time.time() while not target.ready(): time.sleep(0) if timeout: timeout -= time.time() - start if timeout < 0: raise TimeoutError()
tpu_estimator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================== """TPUEstimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import os import signal import sys import threading import time import numpy as np import six from six.moves import queue as Queue # pylint: disable=redefined-builtin from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result from tensorflow.contrib.tpu.python.tpu import tensor_tracer from tensorflow.contrib.tpu.python.ops import tpu_ops from tensorflow.contrib.tpu.python.tpu import error_handling from tensorflow.contrib.tpu.python.tpu import session_support from tensorflow.contrib.tpu.python.tpu import tpu from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_context from tensorflow.contrib.tpu.python.tpu import tpu_feed from tensorflow.contrib.tpu.python.tpu import training_loop from tensorflow.contrib.tpu.python.tpu import util as util_lib from tensorflow.contrib.training.python.training import hparam from tensorflow.core.framework import variable_pb2 from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest as data_nest from tensorflow.python.estimator import estimator as estimator_lib from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator.export import export_output as export_output_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import summary_ops_v2 as contrib_summary from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import tag_constants from tensorflow.python.summary import summary from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import evaluation from tensorflow.python.training import session_run_hook from tensorflow.python.training import training from tensorflow.python.training import training_util from tensorflow.python.util import function_utils from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect _INITIAL_LOSS = 1e7 _ZERO_LOSS = 0. _TPU_ESTIMATOR = 'tpu_estimator' _ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop' _BATCH_SIZE_KEY = 'batch_size' _CTX_KEY = 'context' _USE_TPU_KEY = 'use_tpu' _CROSS_REPLICA_SUM_OP = 'CrossReplicaSum' _ONE_GIGABYTE = 1024 * 1024 * 1024 _TPU_ENQUEUE_OPS = '_tpu_enqueue_ops' _TPU_TRAIN_OP = '_tpu_train_op' _REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference' # Ideally _USE_TPU_KEY should be reserved as well. However there are already # models that make use of this key, thus it can not be reserved now to prevent # breakage. In the long run, we would like to mitigate this by migrating models # off of using _USE_TPU_KEY. _RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY] # TODO(b/65703635): Flip the value and remove all dead code. Currently, this is # only used for per-core based deployments. For per-host based pipelines, if a # user returns a Dataset instance it will be automatically wrapped in a # tf.while_loop (This can be disabled by returning features and labels # explicitly). _WRAP_INPUT_FN_INTO_WHILE_LOOP = False ops.register_proto_function( '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR), proto_type=variable_pb2.VariableDef, to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access def _is_iterable(obj): """A Python 2 and 3 compatible util to check whether `obj` is iterable.""" try: iter(obj) return True except TypeError: return False def _create_global_step(graph): graph = graph or ops.get_default_graph() if training.get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, use_resource=True, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) def _create_or_get_iterations_per_loop(): """Creates or gets the iterations_per_loop variable. In TPUEstimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each TPU program execution and before the next TPU execution. The purpose of using a variable, rather then a constant, is to allow TPUEstimator adapt the TPU training iterations according to the final steps specified by users. For example, if the user sets the iterations_per_loop as 4 in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop variable will have the following value before each TPU training. - 1-th TPU execution: iterations_per_loop = 4 - 2-th TPU execution: iterations_per_loop = 4 - 3-th TPU execution: iterations_per_loop = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all TPU executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi iterations_per_loop variables were found. """ graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True) def _sync_variables_ops(ctx): """Create varriables synchronization ops. Gets the variables back from TPU nodes. This means the variables updated by TPU will now be *synced* to host memory. In BROADCAST mode, we skip this sync since the variables are ususally too big to transmit via RPC. Args: ctx: A `_InternalTPUContext` instance with mode. Returns: A list of sync ops. """ if not ctx.is_input_broadcast_with_iterators(): return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ] else: return [control_flow_ops.no_op()] def _increase_eval_step_op(iterations_per_loop): """Returns an op to increase the eval step for TPU evaluation. Args: iterations_per_loop: Tensor. The number of eval steps running in TPU system before returning to CPU host for each `Session.run`. Returns: An operation """ eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access # Estimator evaluate increases 1 by default. So, we increase the difference. return state_ops.assign_add( eval_step, math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype), use_locking=True) def _extract_key_names(tensor_or_dict): if isinstance(tensor_or_dict, dict): return sorted(tensor_or_dict.keys()) return [] class _SIGNAL(object): """Signal used to control the thread of infeed/outfeed. All preserved signals must be negative numbers. Positive numbers are used to indicate the number of iterations for next training/evaluation loop. """ NEXT_BATCH = -1 STOP = -2 class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access """Ops and objects returned from a `model_fn` and passed to `TPUEstimator`. See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and `export_outputs`. For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where `metric_fn` runs on CPU to generate metrics and `tensors` represents the `Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`. To be precise, TPU evaluation expects a slightly different signature from the `tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`. The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The `tensors` usually specify the model logits, which are transferred back from TPU system to CPU host. All tensors must have be batch-major, i.e., the batch size is the first dimension. Once all tensors are available at CPU host from all shards, they are concatenated (on CPU) and passed as positional arguments to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is a dict. `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the `eval_metrics`. `scaffold_fn` is a function running on CPU to generate the `Scaffold`. This function should not capture any Tensors in `model_fn`. `host_call` is a tuple of a `function` and a list or dictionary of `tensors` to pass to that function and returns a list of Tensors. `host_call` currently works for train() and evaluate(). The Tensors returned by the function is executed on the CPU on every step, so there is communication overhead when sending tensors from TPU to CPU. To reduce the overhead, try reducing the size of the tensors. The `tensors` are concatenated along their major (batch) dimension, and so must be >= rank 1. The `host_call` is useful for writing summaries with `tf.contrib.summary.create_file_writer`. """ def __new__(cls, mode, predictions=None, loss=None, train_op=None, eval_metrics=None, export_outputs=None, scaffold_fn=None, host_call=None, training_hooks=None, evaluation_hooks=None, prediction_hooks=None): """Creates a validated `TPUEstimatorSpec` instance.""" host_calls = {} if eval_metrics is not None: host_calls['eval_metrics'] = eval_metrics if host_call is not None: host_calls['host_call'] = host_call _OutfeedHostCall.validate(host_calls) training_hooks = tuple(training_hooks or []) evaluation_hooks = tuple(evaluation_hooks or []) prediction_hooks = tuple(prediction_hooks or []) for hook in training_hooks + evaluation_hooks + prediction_hooks: if not isinstance(hook, session_run_hook.SessionRunHook): raise TypeError('All hooks must be SessionRunHook instances, given: {}' .format(hook)) return super(TPUEstimatorSpec, cls).__new__( cls, mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics, export_outputs=export_outputs, scaffold_fn=scaffold_fn, host_call=host_call, training_hooks=training_hooks, evaluation_hooks=evaluation_hooks, prediction_hooks=prediction_hooks) def as_estimator_spec(self): """Creates an equivalent `EstimatorSpec` used by CPU train/eval.""" host_calls = {} if self.eval_metrics is not None: host_calls['eval_metrics'] = self.eval_metrics if self.host_call is not None: host_calls['host_call'] = self.host_call host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) eval_metric_ops = None if self.eval_metrics is not None: eval_metric_ops = host_call_ret['eval_metrics'] hooks = None if self.host_call is not None: hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])] if tensor_tracer.TensorTracer.is_enabled(): tt = tensor_tracer.TensorTracer() tracing_calls = tt.trace_cpu(ops.get_default_graph()) tracing_call_ret = _OutfeedHostCall.create_cpu_hostcall(tracing_calls) tracing_functions = tracing_call_ret.values() if tracing_functions: if hooks: hooks.extend([_OutfeedHostCallHook(tracing_functions)]) else: hooks = [_OutfeedHostCallHook(tracing_functions)] hooks = tuple(hooks or []) scaffold = self.scaffold_fn() if self.scaffold_fn else None return model_fn_lib.EstimatorSpec( mode=self.mode, predictions=self.predictions, loss=self.loss, train_op=self.train_op, eval_metric_ops=eval_metric_ops, export_outputs=self.export_outputs, scaffold=scaffold, training_hooks=self.training_hooks + hooks, evaluation_hooks=self.evaluation_hooks + hooks, prediction_hooks=self.prediction_hooks + hooks) class _OpQueueContext(object): """Manages work queue and thread for a infeed/outfeed thread.""" def __init__(self, name, target, args): self._name = name self._queue = Queue.Queue() args = (self,) + args self._thread = threading.Thread(name=name, target=target, args=args) self._thread.daemon = True self._thread.start() def stop(self): self._queue.put(_SIGNAL.STOP) def send_next_batch_signal(self, iterations): self._queue.put(iterations) def read_iteration_counts(self): while True: iterations = self._queue.get(block=True) logging.debug('%s read iterations %s', self._name, iterations) if iterations == _SIGNAL.STOP: logging.info('%s received shutdown signal, stopping.', self._name) return yield iterations def join(self): logging.info('Shutting down %s thread.', self._name) self.stop() self._thread.join() class _OpSignalOnceQueueContext(_OpQueueContext): """Manages work queue and thread for a infeed/outfeed thread. This subclass only signals once. """ def __init__(self, name, target, args): super(_OpSignalOnceQueueContext, self).__init__(name, target, args) self._has_signaled = False def send_next_batch_signal(self, iterations): if not self._has_signaled: self._queue.put(iterations) self._has_signaled = True class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook): """A Session hook setting up the TPU initialization, infeed, and outfeed. This hook does two major things: 1. initialize and shutdown TPU system. 2. launch and join the threads for infeed enqueue and (optional) outfeed dequeue. """ def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, run_infeed_loop_on_coordinator=True, rendezvous=None, master=None, session_config=None): self._master_job = ctx.master_job self._enqueue_ops = enqueue_ops self._dequeue_ops = dequeue_ops self._rendezvous = rendezvous self._master = master self._session_config = session_config self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator self._initial_infeed_sleep_secs = ( ctx.config.tpu_config.initial_infeed_sleep_secs) self._feed_error = None self._finished = False self._should_initialize_tpu = True self._tpu_compile_op = tpu_compile_op def begin(self): logging.info('TPU job name %s', self._master_job) self._iterations_per_loop_var = _create_or_get_iterations_per_loop() self._init_ops = [] if self._should_initialize_tpu: self._finalize_ops = [tpu.shutdown_system(job=self._master_job)] else: self._finalize_ops = [] summary_writer_init_ops = contrib_summary.summary_writer_initializer_op() self._init_ops.extend(summary_writer_init_ops) # Get all the writer resources from the initializer, so we know what to # flush. for op in summary_writer_init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('Infeed thread sleeping for %d seconds.', self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('Infeed thread starting after sleep') with self._rendezvous.catch_errors(source='infeed', session=session): if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.') def _run_outfeed(self, queue_ctx, session): logging.info('Starting outfeed thread controller.') with self._rendezvous.catch_errors(source='outfeed', session=session): for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i) session.run(self._dequeue_ops) logging.info('Outfeed thread finished, shutting down.') def _create_infeed_controller(self, name, target, args): return _OpQueueContext(name=name, target=target, args=args) def _assertCompilationSucceeded(self, result, coord): proto = tpu_compilation_result.CompilationResultProto() proto.ParseFromString(result) if proto.status_error_message: logging.error('Compilation failed: {}'.format(proto.status_error_message)) coord.request_stop() else: logging.info('Compilation succeeded') def after_create_session(self, session, coord): if self._should_initialize_tpu: logging.info('Init TPU system') start = time.time() with ops.Graph().as_default(): with tf_session.Session( self._master, config=self._session_config) as sess: sess.run(tpu.initialize_system(job=self._master_job)) logging.info('Initialized TPU in %d seconds', time.time() - start) session.run(self._init_ops, options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000)) if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1': logging.info('Compiling user program: this may take a while...') self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord) self._infeed_controller = self._create_infeed_controller( name='InfeedController', target=self._run_infeed, args=(session,)) self._outfeed_controller = _OpQueueContext( name='OutfeedController', target=self._run_outfeed, args=(session,)) # Enable the worker watchdog to terminate workers on coordinator exit. watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0')) if watchdog_timeout > 0: session_support.start_worker_watchdog(session, shutdown_timeout=watchdog_timeout) def before_run(self, run_context): self._feed_error = None iterations = run_context.session.run(self._iterations_per_loop_var) logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations) self._infeed_controller.send_next_batch_signal(iterations) logging.info('Dequeue next (%d) batch(es) of data from outfeed.', iterations) self._outfeed_controller.send_next_batch_signal(iterations) def end(self, session): self._finished = True logging.info('Stop infeed thread controller') self._infeed_controller.join() self._rendezvous.record_done('infeed') logging.info('Stop output thread controller') self._outfeed_controller.join() self._rendezvous.record_done('outfeed') logging.info('Shutdown TPU system.') session.run(self._finalize_ops) class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook): def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, rendezvous=None, master=None, session_config=None): super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__( ctx, enqueue_ops, dequeue_ops, tpu_compile_op=tpu_compile_op, run_infeed_loop_on_coordinator=False, rendezvous=rendezvous, master=master, session_config=session_config) def _create_infeed_controller(self, name, target, args): return _OpSignalOnceQueueContext(name=name, target=target, args=args) class _TPUStopAtStepHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step. This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with following differences for TPU training: 1. This hook sets the variable for iterations_per_loop, which is used by `TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed. As the hook execution order is not guaranteed, the variable update is handled in `after_create_session` and `after_run` as `TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`. 2. For each training loop (session.run), the global step could be increased multiple times on TPU. The global step tensor value will be explicitly read again in `after_run` to ensure the latest value is retrieved to avoid race condition. """ def __init__(self, iterations, num_steps=None, last_step=None): """Initializes a `StopAtStepHook`. Args: iterations: The number of iterations to run optimizer per training loop. num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ if num_steps is None and last_step is None: raise ValueError('One of num_steps or last_step must be specified.') if num_steps is not None and last_step is not None: raise ValueError('Only one of num_steps or last_step can be specified.') self._num_steps = num_steps self._last_step = last_step self._iterations = iterations def _next_iterations(self, global_step, last_step): gap = last_step - global_step return min(gap, self._iterations) def begin(self): self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError('Global step should be created.') self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) if self._last_step is None: self._last_step = global_step + self._num_steps iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load(iterations, session=session) def after_run(self, run_context, run_values): # Global step cannot be retrieved via SessionRunArgs and before_run due to # race condition. global_step = run_context.session.run(self._global_step_tensor) if global_step >= self._last_step: run_context.request_stop() else: iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load( iterations, session=run_context.session) class _SetEvalIterationsHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step.""" def __init__(self, num_steps): """Initializes a `_SetEvalIterationsHook`. Args: num_steps: Number of steps to execute. """ self._num_steps = num_steps def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): self._iterations_per_loop_var.load(self._num_steps, session=session) class _StoppingPredictHook(session_run_hook.SessionRunHook): """Hook that requests stop according to the stopping signal in prediction.""" def __init__(self, scalar_stopping_signal): self._scalar_stopping_signal = scalar_stopping_signal def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): # This is not necessary as we do not run infeed enqueue and outfeed dequeue # in side threads for prediction model. But it makes the # TPUInfeedOutfeedSessionHook prints nice message. self._iterations_per_loop_var.load(1, session=session) def before_run(self, run_context): return session_run_hook.SessionRunArgs(self._scalar_stopping_signal) def after_run(self, run_context, run_values): _ = run_context scalar_stopping_signal = run_values.results if _StopSignals.should_stop(scalar_stopping_signal): # NOTE(xiejw): In prediction, stopping signals are inserted for each # batch. And we append one more batch to signal the system it should stop. # The data flow might look like # # batch 0: images, labels, stop = 0 (user provided) # batch 1: images, labels, stop = 0 (user provided) # ... # batch 99: images, labels, stop = 0 (user provided) # batch 100: images, labels, stop = 1 (TPUEstimator appended) # # where the final batch (id = 100) is appended by TPUEstimator, so we # should drop it before returning the predictions to user. # To achieve that, we throw the OutOfRangeError in after_run. Once # Monitored Session sees this error in SessionRunHook.after_run, the # "current" prediction, i.e., batch with id=100, will be discarded # immediately raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') def generate_per_core_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, host_device, host_id): """Generates infeed enqueue ops for per-core input_fn on a single host.""" captured_infeed_queue = _CapturedObject() tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """A fn returns enqueue_ops.""" num_cores_per_host = ctx.num_of_cores_per_host per_host_sharded_inputs = [] for core_ordinal in range(num_cores_per_host): with ops.name_scope('ordinal_%d' % (core_ordinal)): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=host_device, invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal) inputs = _Inputs.from_input_fn(input_fn(user_context)) if inputs.is_dataset: raise TypeError( '`input_fn` returning `Dataset` is not yet supported in ' 'per-Core input pipeline deployment yet. Please set ' 'TPUConfig.per_host_input_for_training to True or return ' '`features` and `labels` from `input_fn`') features, labels = inputs.features_and_labels() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) return per_host_enqueue_ops return enqueue_ops_fn, captured_infeed_queue def generate_per_host_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() dataset_initializer = None with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if ctx.mode == model_fn_lib.ModeKeys.PREDICT: if not is_dataset: raise TypeError( 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' '`features` and `labels`.') if batch_axis is not None: raise TypeError('For mode PREDICT, batch_axis is not supported yet.') inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True) if is_dataset: dataset_initializer = inputs.dataset_initializer() tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """A Fn returning the TPU infeed enqueue ops. By providing as a Fn, it can be invoked inside the tf.while_loop such that the input pipeline for multiple iterations can be executed by one Session.run call. Returns: list of dict of ops. """ with ops.device(device): num_of_replicas_per_host = ctx.num_of_replicas_per_host # Convert user input to features and labels. If the user returns a # dataset, it is initialized and the features and labels extracted via # `dataset.iterator.get_next()` features, labels = inputs.features_and_labels() signals = inputs.signals() inputs_structure_recorder.validate_and_record_structure(features, labels) unsharded_tensor_list = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) infeed_queue = tpu_feed.InfeedQueue( tuple_types=[t.dtype for t in unsharded_tensor_list], tuple_shapes=[t.shape for t in unsharded_tensor_list], shard_dimensions=batch_axis) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_number_of_shards(num_of_replicas_per_host) per_host_enqueue_ops = ( infeed_queue.split_inputs_and_generate_enqueue_ops( unsharded_tensor_list, placement_function=lambda x: device, tpu_ordinal_function=tpu_ordinal_function_impl)) if signals is None: return per_host_enqueue_ops else: return { 'ops': per_host_enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, dataset_initializer def generate_per_host_v2_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() dataset_initializer = None with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if not is_dataset: raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 ' 'input pipeline configuration.') if ctx.mode == model_fn_lib.ModeKeys.PREDICT: inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True, num_invocations_per_step=ctx.num_of_replicas_per_host) dataset_initializer = inputs.dataset_initializer() tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """Generates the per_host enqueue ops.""" control_deps = [] per_host_sharded_inputs = [] num_replicas_per_host = ctx.num_of_replicas_per_host cached_signals = None with ops.device(device): if not inputs.is_dataset: raise TypeError('`input_fn` must return a `Dataset` for this mode.') for _ in range(num_replicas_per_host): # Use control dependencies to ensure a deterministic ordering. with ops.control_dependencies(control_deps): features, labels = inputs.features_and_labels() # Calls get_next() signals = inputs.signals() # All the replicas share the replica 0's stopping singal. # This avoids inconsistent state among different model replcias. if cached_signals: signals['stopping'] = cached_signals['stopping'] else: cached_signals = signals inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) control_deps.extend(flattened_inputs) per_host_sharded_inputs.append(flattened_inputs) if inputs_structure_recorder.flattened_input_dims: input_partition_dims = inputs_structure_recorder.flattened_input_dims if signals: input_partition_dims += [None] * len(signals) # pylint: disable=protected-access infeed_queue = tpu_feed._PartitionedInfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0]), host_id=host_id, input_partition_dims=input_partition_dims, device_assignment=ctx.device_assignment) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs) else: infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) captured_infeed_queue.capture(infeed_queue) if signals is None: return per_host_enqueue_ops else: return { 'ops': per_host_enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, dataset_initializer def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder, num_hosts): """Generates infeed enqueue ops for one input_fn on all the hosts.""" captured_infeed_queue = _CapturedObject() dataset_initializer = None device_0 = ctx.tpu_host_placement_function(host_id=0) with ops.device(device_0): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device_0, invocation_index=0) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if ctx.mode == model_fn_lib.ModeKeys.PREDICT: if not is_dataset: raise TypeError( 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' '`features` and `labels`.') inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True) if is_dataset: dataset_initializer = inputs.dataset_initializer() num_replicas_per_host = ctx.num_of_replicas_per_host def tpu_ordinal_function_impl(replica_id): if ctx.device_assignment: return ctx.device_assignment.tpu_ordinal(replica=replica_id) else: return replica_id % num_replicas_per_host def device_function_impl(replica_id): return ctx.tpu_host_placement_function(replica_id=replica_id) def enqueue_ops_fn(): """Generates enqueue ops for all the hosts.""" broadcasted_inputs = [] flattened_inputs = None # Cache result from input_fn. signals = None for host_id in xrange(num_hosts): with ops.device(ctx.tpu_host_placement_function(host_id=host_id)): for _ in xrange(ctx.num_of_replicas_per_host): # Note: input_fn is only called once at host 0 for the first replica. # The features and labels returned from that invocation are # broadcasted to other replicas(including the replicas on other # hosts). if flattened_inputs is None: features, labels = inputs.features_and_labels() # Calls get_next() signals = inputs.signals() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) broadcasted_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(broadcasted_inputs[0])) captured_infeed_queue.capture(infeed_queue) enqueue_ops = infeed_queue.generate_enqueue_ops( broadcasted_inputs, tpu_ordinal_function=tpu_ordinal_function_impl, placement_function=device_function_impl) if signals is None: return enqueue_ops else: return { 'ops': enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, dataset_initializer class _InputPipeline(object): """`_InputPipeline` handles invoking `input_fn` and piping to infeed queue. `_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from call site. To be precise, based on the configuration in `_InternalTPUContext`, it invokes `input_fn` for all cores (usually multi-host TPU training) or for one host (usually for single-host TPU evaluation), and sends all `features` and `labels` returned by `input_fn` to TPU infeed. For per-core invocation, `features` and `labels` are piped to infeed directly, one tuple for each core. For per-host invocation, `features` and `labels` are split at host (with respect to `batch_axis`) and piped to all cores accordingly. In addition, flatten/unflatten are handled by `_InputPipeline` also. Model inputs returned by the `input_fn` can have one of the following forms: 1. features 2. (features, labels) 3. ((arbitrarily nested structure of features), labels) Internally, form 1 is reformed to `(features, None)` as features and labels are passed separately to underlying methods. For TPU training, TPUEstimator may expect multiple `features` and `labels` tuples one for each core. TPUEstimator allows various different structures for inputs (namely `features` and `labels`). Both `features` and `labels` can be any nested sturcture supported by TF nest (namely, dict, tuples, namedtuples or any nested structure of such of Tensors). `labels` could be `None` as well. These are flattened before they are passed to the infeed/outfeed library as that expectes flattend lists. """ class InputsStructureRecorder(object): """The recorder to record inputs structure.""" def __init__(self, input_partition_dims=None): # Holds the structure of inputs self._feature_structure = {} self._flattened_input_dims = None if input_partition_dims: # This should have been validated in TPUConfig. assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.' if len(input_partition_dims) == 2: self._feature_dims, self._label_dims = input_partition_dims else: self._feature_dims = input_partition_dims[0] self._label_dims = None assert self._feature_dims is not None, ('input_partition_dims[0] must ' 'not be None') else: self._feature_dims = None self._label_dims = None # Internal state. self._initialized = False @property def flattened_input_dims(self): assert self._initialized, 'InputsStructureRecorder is not initialized.' return self._flattened_input_dims def has_labels(self): return 'labels' in self._feature_structure def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims, label_dims_names, label_names, has_labels): """Flatten input dims with the same order as flattened input tensors.""" flattened_input_dims = [] if feature_dims_names: # We need a fixed ordering for matching the tensors in features. flattened_input_dims.extend( [feature_dims[name] for name in feature_dims_names]) else: flattened_input_dims.append(feature_dims) if label_dims_names: # We need a fixed ordering for matching the tensors in labels. flattened_input_dims.extend( [label_dims[name] for name in label_dims_names]) else: if label_names: num_tensors_in_label = len(label_names) else: num_tensors_in_label = int(has_labels) # Setting `None` in input_partition_dims[1] will apply `None` to # all the tensors in labels, regardless of internal structure. flattened_input_dims.extend([label_dims] * num_tensors_in_label) return flattened_input_dims def validate_and_record_structure(self, features, labels): """Validates and records the structure of `features` and `labels`.""" # Extract structure. has_labels = labels is not None feature_names = _extract_key_names(features) label_names = _extract_key_names(labels) if not self._initialized: # Record structure. self._initialized = True if self._feature_dims is not None: feature_dims_names = _extract_key_names(self._feature_dims) if feature_dims_names != feature_names: raise ValueError( 'TPUConfig.input_partition_dims[0] mismatched feature' ' keys. Expected {}, got {}'.format(feature_names, feature_dims_names)) label_dims_names = _extract_key_names(self._label_dims) if self._label_dims is not None and label_dims_names != label_names: raise ValueError( 'TPUConfig.input_partition_dims[1] mismatched label' ' keys. Expected {}, got {}'.format(label_names, label_dims_names)) self._flattened_input_dims = self._flatten_input_dims( self._feature_dims, feature_dims_names, self._label_dims, label_dims_names, label_names, has_labels) def flatten_features_and_labels(self, features, labels, signals=None): """Flattens the `features` and `labels` to a single tensor list.""" self._feature_structure['features'] = features if labels is not None: self._feature_structure['labels'] = labels if signals is not None: self._feature_structure['signals'] = signals return data_nest.flatten(self._feature_structure) def unflatten_features_and_labels(self, flattened_inputs): """Restores the flattened inputs to original features and labels form. Args: flattened_inputs: Flattened inputs for each shard. Returns: A tuple of (`features`, `labels`), where `labels` could be None. Each one, if present, should have identical structure (single tensor vs dict) as the one returned by input_fn. Raises: ValueError: If the number of expected tensors from `flattened_inputs` mismatches the recorded structure. """ unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure, flattened_inputs) return _Inputs( unflattened_inputs['features'], unflattened_inputs.get('labels'), signals=unflattened_inputs.get('signals')) def __init__(self, input_fn, batch_axis, ctx): """Constructor. Args: input_fn: input fn for train or eval. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. ctx: A `_InternalTPUContext` instance with mode. Raises: ValueError: If both `sharded_features` and `num_cores` are `None`. """ self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder( ctx.input_partition_dims) self._sharded_per_core = ctx.is_input_sharded_per_core() self._input_fn = input_fn self._infeed_queue = None self._ctx = ctx self._batch_axis = batch_axis def generate_infeed_enqueue_ops_and_dequeue_fn(self): """Generates infeed enqueue ops and dequeue_fn.""" # While tf.while_loop is called, the body function, which invokes # `enqueue_fn` passed in, is called to construct the graph. So, input_fn # structure is recorded. enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( self._invoke_input_fn_and_record_structure()) self._validate_input_pipeline() def dequeue_fn(): """dequeue_fn is used by TPU to retrieve the tensors.""" # In the model-parallel case, both the host-side and device-side # computations must agree on the core on which infeed takes place. We # choose to perform infeed on logical core 0 of each replica. values = self._infeed_queue.generate_dequeue_op(tpu_device=0) # The unflatten process uses the structure information recorded above. return self._inputs_structure_recorder.unflatten_features_and_labels( values) return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator) def _invoke_input_fn_and_record_structure(self): """Deploys the input pipeline and record input structure.""" enqueue_ops = [] infeed_queues = [] all_dataset_initializers = [] num_hosts = self._ctx.num_hosts tpu_host_placement_fn = self._ctx.tpu_host_placement_function run_infeed_loop_on_coordinator = True if self._sharded_per_core: # Per-Core input pipeline deployment. # Invoke input pipeline for each core and placed on the corresponding # host. for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): enqueue_ops_fn, captured_infeed_queue = ( generate_per_core_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) if _WRAP_INPUT_FN_INTO_WHILE_LOOP: run_infeed_loop_on_coordinator = False enqueue_ops.append( _wrap_computation_in_while_loop( device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) # Infeed_queue_getter must be called after enqueue_ops_fn is called. infeed_queues.append(captured_infeed_queue.get()) elif self._ctx.is_input_broadcast_with_iterators(): # Only calls input_fn in host 0. host_device = tpu_host_placement_fn(host_id=0) enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn, self._inputs_structure_recorder, num_hosts)) if dataset_initializer: all_dataset_initializers.append(dataset_initializer) run_infeed_loop_on_coordinator = False wrap_fn = ( _wrap_computation_in_while_loop if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else _wrap_computation_in_while_loop_with_stopping_signals) enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) infeed_queues.append(captured_infeed_queue.get()) else: for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): if self._ctx.is_input_per_host_with_iterators(): enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( generate_per_host_v2_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) else: enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( generate_per_host_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, self._batch_axis, host_device, host_id)) # NOTE(xiejw): We dispatch here based on the return type of the # users `input_fn`. # # 1. If input_fn returns a Dataset instance, we initialize the # iterator outside of tf.while_loop, and call the iterator.get_next # inside tf.while_loop. This should be always safe. # # 2. If input_fn returns (features, labels), it is too late to wrap # them inside tf.while_loop, as resource initialization cannot be # handled in TF control flow properly. In this case, we will use # python loop to enqueue the data into TPU system. This may be # slow compared to the previous case. if dataset_initializer: all_dataset_initializers.append(dataset_initializer) run_infeed_loop_on_coordinator = False wrap_fn = ( _wrap_computation_in_while_loop if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else _wrap_computation_in_while_loop_with_stopping_signals) enqueue_ops.append( wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) infeed_queues.append(captured_infeed_queue.get()) # infeed_queue is used to generate dequeue ops. The only thing it uses for # dequeue is dtypes and types. So, any one can be used. Here, grab the # first one. self._infeed_queue = infeed_queues[0] return enqueue_ops, [ util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers) ], run_infeed_loop_on_coordinator def _validate_input_pipeline(self): """Validates the input pipeline. Perform some sanity checks to log user friendly information. We should error out to give users better error message. But, if _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break user code, so, log a warning. Raises: RuntimeError: If the validation failed. """ if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS): err_msg = ('Input pipeline contains one or more QueueRunners. ' 'It could be slow and not scalable. Please consider ' 'converting your input pipeline to use `tf.data` instead (see ' 'https://www.tensorflow.org/guide/datasets for ' 'instructions.') if _WRAP_INPUT_FN_INTO_WHILE_LOOP: raise RuntimeError(err_msg) else: logging.warn(err_msg) class _ModelFnWrapper(object): """A `model_fn` wrapper. This makes calling model_fn on CPU and TPU easier and more consistent and performs necessary check and mutation required by TPU training and evaluation. In addition, this wrapper manages converting the `model_fn` to a single TPU train and eval step. """ def __init__(self, model_fn, config, params, ctx): self._model_fn = model_fn self._config = config self._params = params self._ctx = ctx def call_without_tpu(self, features, labels, is_export_mode): return self._call_model_fn(features, labels, is_export_mode=is_export_mode) def convert_to_single_tpu_train_step(self, dequeue_fn): """Converts user provided model_fn` as a single train step on TPU. The user provided `model_fn` takes input tuple (features, labels) and produces the EstimatorSpec with train_op and loss for train `mode`. This usually represents a single train computation on CPU. For TPU training, a train (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input should be taken from TPU infeed rather than input pipeline (input_fn) directly. To fit TPU loop and replicate pattern, the original train computation should be reformed, which is the returned `train_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn representing the train step for TPU. """ host_call = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() captured_training_hooks = _CapturedObject() def train_step(loss): """Training step function for use inside a while loop.""" del loss # unused; required in function signature. inputs = dequeue_fn() features, labels = inputs.features_and_labels() estimator_spec = self._verify_estimator_spec( self._call_model_fn(features, labels)) loss, train_op = estimator_spec.loss, estimator_spec.train_op if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access captured_scaffold_fn.capture(estimator_spec.scaffold_fn) else: captured_scaffold_fn.capture(None) captured_training_hooks.capture(estimator_spec.training_hooks) tracing_ops = [] if tensor_tracer.TensorTracer.is_enabled(): tt = tensor_tracer.TensorTracer() loss, tracing_ops = tt.trace_tpu(ops.get_default_graph(), loss, self._ctx.num_replicas) # We must run train_op to update the variables prior to running the # outfeed. with ops.control_dependencies([train_op]+tracing_ops): host_call_outfeed_ops = [] if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access and estimator_spec.host_call is not None): host_call.record({'host_call': estimator_spec.host_call}) host_call_outfeed_ops = host_call.create_enqueue_op() with ops.control_dependencies(host_call_outfeed_ops): return array_ops.identity(loss) return (train_step, host_call, captured_scaffold_fn, captured_training_hooks) def convert_to_single_tpu_eval_step(self, dequeue_fn): """Converts user provided model_fn` as a single eval step on TPU. Similar to training, the user provided `model_fn` takes input tuple (features, labels) and produces the TPUEstimatorSpec with eval_metrics for eval `mode`. This usually represents a single evaluation computation on CPU. For TPU evaluation, a eval (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input and output are slightly different. Input, features and labels, should be taken from TPU infeed rather than input pipeline (input_fn) directly. Output is managed in two stages. First, the model outputs as the result of evaluation computation, usually model logits, should be transferred from TPU system to CPU. Then, all model outputs are concatenated first on CPU and sent to the metric_fn for metrics computation. To fit TPU evaluation pattern, the original eval computation should be reformed, which is the returned `eval_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn representing the eval step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() captured_eval_hooks = _CapturedObject() def eval_step(total_loss): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() tpu_estimator_spec = self._call_model_fn(features, labels) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU evaluation must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) loss = tpu_estimator_spec.loss captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks) to_record = {} if tpu_estimator_spec.eval_metrics: to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics if tpu_estimator_spec.host_call is not None: # We assume that evaluate won't update global step, so we don't wrap # this host_call. to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return math_ops.add(total_loss, loss) return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks def convert_to_single_tpu_predict_step(self, dequeue_fn): """Converts user provided model_fn` as a single predict step on TPU. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of predict_fn, host_calls, and captured scaffold_fn. The predict_fn representing the predict step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() captured_predict_hooks = _CapturedObject() def predict_step(unused_scalar_stopping_signal): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() stopping_signals = inputs.signals() assert stopping_signals is not None, ( 'Internal Error: `signals` is missing.') tpu_estimator_spec = self._call_model_fn( features, labels, is_export_mode=False) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU prediction must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions) captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks) to_record = {} identity_fn = lambda **kwargs: kwargs to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions] to_record['signals'] = [identity_fn, stopping_signals] if tpu_estimator_spec.host_call is not None: to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return _StopSignals.as_scalar_stopping_signal(stopping_signals) return (predict_step, host_calls, captured_scaffold_fn, captured_predict_hooks) def _verify_tpu_spec_predictions(self, predictions): """Validates TPUEstimatorSpec.predictions dict.""" # TODO(xiejw): Adds validation for prediction dictionrary. # TODO(xiejw): Adds support for single tensor as predictions. if not isinstance(predictions, dict): raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.') for (key, tensor) in predictions.items(): if tensor.shape.dims[0].value is None: raise ValueError( 'The tensor with key ({}) in TPUEstimatorSpec.predictions has ' 'dynamic shape (should be static). Tensor: {}'.format(key, tensor)) return predictions def _validate_model_features_and_labels(self, features, labels, is_export_mode): """Validates that the features and labels for the model function are valid. A valid features/labels object is the one with: - Type: A tensor or any nested structure of tensors supported by TF nest, namely nested dictionary, tuple, namedtuple, or sequence of tensors. - Static shape if is_export_mode is False. Args: features: the features that would be input to the model function. labels: the labels that would be input to the model function. is_export_mode: boolean value specifying if in export mode. Raises: TypeError: If features/labels are not of the correct type. ValueError: If features/labels have dynamic shape. """ def validate(obj, obj_name): """Helper validate function.""" if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode): return if isinstance(obj, ops.Tensor): if not obj.get_shape().is_fully_defined(): raise ValueError( 'The {} to the model returned by input_fn must have static shape.' ' Tensor: {}'.format(obj_name, obj)) else: for tensor in data_nest.flatten(obj): if not tensor.get_shape().is_fully_defined(): raise ValueError( ('The {} to the model returned by input_fn must have static ' 'shape. Tensor: {}').format(obj_name, tensor)) validate(features, 'features') if labels is not None: validate(labels, 'labels') def _call_model_fn(self, features, labels, is_export_mode=False): """Calls the model_fn with required parameters.""" self._validate_model_features_and_labels(features, labels, is_export_mode) model_fn_args = function_utils.fn_args(self._model_fn) kwargs = {} # Makes deep copy with `config` and params` in case user mutates them. config = copy.deepcopy(self._config) params = copy.deepcopy(self._params) if 'labels' in model_fn_args: kwargs['labels'] = labels elif labels is not None: raise ValueError( 'model_fn does not take labels, but input_fn returns labels.') if 'mode' in model_fn_args: kwargs['mode'] = self._ctx.mode if 'config' in model_fn_args: kwargs['config'] = config if 'params' in model_fn_args: kwargs['params'] = params if 'params' not in model_fn_args: raise ValueError('model_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params[\'batch_size\']'.format(self._model_fn)) if is_export_mode: batch_size_for_model_fn = None else: batch_size_for_model_fn = self._ctx.batch_size_for_model_fn if batch_size_for_model_fn is not None: _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn) running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode) _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu) if not running_on_cpu: user_context = tpu_context.TPUContext( internal_ctx=self._ctx, call_from_input_fn=False) _add_item_to_params(params, _CTX_KEY, user_context) estimator_spec = self._model_fn(features=features, **kwargs) if (running_on_cpu and isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access # The estimator_spec will be passed to `Estimator` directly, which expects # type `EstimatorSpec`. return estimator_spec.as_estimator_spec() else: return estimator_spec def _verify_estimator_spec(self, estimator_spec): """Validates the estimator_spec.""" if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access return estimator_spec err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.' if estimator_spec.training_chief_hooks: raise ValueError( err_msg.format('training_chief_hooks') + 'If you want' + ' to pass training hooks, please pass via training_hooks.') if estimator_spec.scaffold: logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. ' 'Please use TPUEstimatorSpec.') return estimator_spec class _OutfeedHostCall(object): """Support for `eval_metrics` and `host_call` in TPUEstimatorSpec.""" def __init__(self, ctx): self._ctx = ctx self._names = [] # All of these are dictionaries of lists keyed on the name. self._host_fns = {} self._tensor_keys = collections.defaultdict(list) self._tensors = collections.defaultdict(list) self._tensor_dtypes = collections.defaultdict(list) self._tensor_shapes = collections.defaultdict(list) @staticmethod def validate(host_calls): """Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.""" for name, host_call in host_calls.items(): if not isinstance(host_call, (tuple, list)): raise ValueError('{} should be tuple or list'.format(name)) if len(host_call) != 2: raise ValueError('{} should have two elements.'.format(name)) if not callable(host_call[0]): raise TypeError('{}[0] should be callable.'.format(name)) if not isinstance(host_call[1], (tuple, list, dict)): raise ValueError('{}[1] should be tuple or list, or dict.'.format(name)) if isinstance(host_call[1], (tuple, list)): fullargspec = tf_inspect.getfullargspec(host_call[0]) fn_args = function_utils.fn_args(host_call[0]) # wrapped_hostcall_with_global_step uses varargs, so we allow that. if fullargspec.varargs is None and len(host_call[1]) != len(fn_args): raise RuntimeError( 'In TPUEstimatorSpec.{}, length of tensors {} does not match ' 'method args of the function, which takes {}.'.format( name, len(host_call[1]), len(fn_args))) @staticmethod def create_cpu_hostcall(host_calls): """Runs on the host_call on CPU instead of TPU when use_tpu=False.""" _OutfeedHostCall.validate(host_calls) ret = {} for name, host_call in host_calls.items(): host_fn, tensors = host_call if isinstance(tensors, (tuple, list)): ret[name] = host_fn(*tensors) else: # Must be dict. try: ret[name] = host_fn(**tensors) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e return ret def record(self, host_calls): """Records the host_call structure.""" for name, host_call in host_calls.items(): host_fn, tensor_list_or_dict = host_call self._names.append(name) self._host_fns[name] = host_fn if isinstance(tensor_list_or_dict, dict): for (key, tensor) in six.iteritems(tensor_list_or_dict): self._tensor_keys[name].append(key) self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) else: # List or tuple. self._tensor_keys[name] = None for tensor in tensor_list_or_dict: self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) def create_enqueue_op(self): """Create the op to enqueue the recorded host_calls. Returns: A list of enqueue ops, which is empty if there are no host calls. """ if not self._names: return [] tensors = [] # TODO(jhseu): Consider deduping tensors. for name in self._names: tensors.extend(self._tensors[name]) with ops.device(tpu.core(0)): return [tpu_ops.outfeed_enqueue_tuple(tensors)] def create_tpu_hostcall(self): """Sends the tensors through outfeed and runs the host_fn on CPU. The tensors are concatenated along dimension 0 to form a global tensor across all shards. The concatenated function is passed to the host_fn and executed on the first host. Returns: A dictionary mapping name to the return type of the host_call by that name. Raises: RuntimeError: If outfeed tensor is scalar. """ if not self._names: return {} ret = {} # For each i, dequeue_ops[i] is a list containing the tensors from all # shards. This list is concatenated later. dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for name in self._names: for _ in self._tensors[name]: dequeue_ops.append([]) for dtype in self._tensor_dtypes[name]: tensor_dtypes.append(dtype) for shape in self._tensor_shapes[name]: tensor_shapes.append(shape) # Outfeed ops execute on each replica's first logical core. Note: we must # constraint it such that we have at most one outfeed dequeue and enqueue # per replica. for i in xrange(self._ctx.num_replicas): host_device, ordinal_id = self._ctx.device_for_replica(i) with ops.device(host_device): outfeed_tensors = tpu_ops.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=ordinal_id) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) # Deconstruct dequeue ops. dequeue_ops_by_name = {} pos = 0 for name in self._names: dequeue_ops_by_name[name] = dequeue_ops[pos:pos + len(self._tensors[name])] pos += len(self._tensors[name]) # It is assumed evaluation always happens on single host TPU system. So, # place all ops on tpu host if possible. # # TODO(jhseu): Evaluate whether this is right for summaries. with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)): for name in self._names: dequeue_ops = dequeue_ops_by_name[name] for i, item in enumerate(dequeue_ops): if dequeue_ops[i][0].shape.ndims == 0: raise RuntimeError( 'All tensors outfed from TPU should preserve batch size ' 'dimension, but got scalar {}'.format(dequeue_ops[i][0])) # TODO(xiejw): Allow users to specify the axis for batch size # dimension. dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0) if self._tensor_keys[name] is not None: # The user-provided eval_metrics[1] is a dict. dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops)) try: ret[name] = self._host_fns[name](**dequeue_ops) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e else: ret[name] = self._host_fns[name](*dequeue_ops) return ret class _OutfeedHostCallHook(session_run_hook.SessionRunHook): """Hook to run host calls when use_tpu=False.""" def __init__(self, tensors): self._tensors = tensors def begin(self): # We duplicate this code from the TPUInfeedOutfeedSessionHook rather than # create a separate hook to guarantee execution order, because summaries # need to be initialized before the outfeed thread starts. # TODO(jhseu): Make a wrapper hook instead? self._init_ops = contrib_summary.summary_writer_initializer_op() # Get all the writer resources from the initializer, so we know what to # flush. self._finalize_ops = [] for op in self._init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def after_create_session(self, session, coord): session.run(self._init_ops) def before_run(self, run_context): return basic_session_run_hooks.SessionRunArgs(self._tensors) def end(self, session): session.run(self._finalize_ops) class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook): """Calculate and report global_step/sec and examples/sec during runtime.""" def __init__(self, batch_size, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None): self._batch_size = batch_size super(ExamplesPerSecondHook, self).__init__( every_n_steps=every_n_steps, every_n_secs=every_n_secs, output_dir=output_dir, summary_writer=summary_writer) def _log_and_record(self, elapsed_steps, elapsed_time, global_step): global_step_per_sec = elapsed_steps / elapsed_time examples_per_sec = self._batch_size * global_step_per_sec if self._summary_writer is not None: global_step_summary = Summary(value=[ Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec) ]) example_summary = Summary(value=[ Summary.Value(tag='examples/sec', simple_value=examples_per_sec) ]) self._summary_writer.add_summary(global_step_summary, global_step) self._summary_writer.add_summary(example_summary, global_step) logging.info('global_step/sec: %g', global_step_per_sec) logging.info('examples/sec: %g', examples_per_sec) class InstallSignalHandlerHook(session_run_hook.SessionRunHook): """Change SIGINT (CTRL^C) handler to force quit the process. The default behavior often results in hanging processes. The original handler is restored after training/evaluation. """ def __init__(self): self._signal_fn = signal.getsignal(signal.SIGINT) def before_run(self, run_context): signal.signal(signal.SIGINT, signal.SIG_DFL) def end(self, session): signal.signal(signal.SIGINT, self._signal_fn) class TPUEstimator(estimator_lib.Estimator): """Estimator with TPU support. TPUEstimator also supports training on CPU and GPU. You don't need to define a separate `tf.estimator.Estimator`. TPUEstimator handles many of the details of running on TPU devices, such as replicating inputs and models for each core, and returning to host periodically to run hooks. TPUEstimator transforms a global batch size in params to a per-shard batch size when calling the `input_fn` and `model_fn`. Users should specify global batch size in constructor, and then get the batch size for each shard in `input_fn` and `model_fn` by `params['batch_size']`. - For training, `model_fn` gets per-core batch size; `input_fn` may get per-core or per-host batch size depending on `per_host_input_for_training` in `TPUConfig` (See docstring for TPUConfig for details). - For evaluation and prediction, `model_fn` gets per-core batch size and `input_fn` get per-host batch size. Evaluation ========== `model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics` for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return `EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case the following discussion on TPU evaluation does not apply. `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where `tensors` could be a list of any nested structure of `Tensor`s (See `TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. One can set `use_tpu` to `False` for testing. All training, evaluation, and predict will be executed on CPU. `input_fn` and `model_fn` will receive `train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`. Current limitations: -------------------- 1. TPU evaluation only works on a single host (one TPU worker) except BROADCAST mode. 2. `input_fn` for evaluation should **NOT** raise an end-of-input exception (`OutOfRangeError` or `StopIteration`). And all evaluation steps and all batches should have the same size. Example (MNIST): ---------------- ``` # The metric Fn which runs on CPU. def metric_fn(labels, logits): predictions = tf.argmax(logits, 1) return { 'accuracy': tf.metrics.precision( labels=labels, predictions=predictions), } # Your model Fn which runs on TPU (eval_metrics is list in this example) def model_fn(features, labels, mode, config, params): ... logits = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits])) # or specify the eval_metrics tensors as dict. def model_fn(features, labels, mode, config, params): ... final_layer_output = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, { 'labels': labels, 'logits': final_layer_output, })) ``` Prediction ========== Prediction on TPU is an experimental feature to support large batch inference. It is not designed for latency-critical system. In addition, due to some usability issues, for prediction with small dataset, CPU `.predict`, i.e., creating a new `TPUEstimator` instance with `use_tpu=False`, might be more convenient. Note: In contrast to TPU training/evaluation, the `input_fn` for prediction *should* raise an end-of-input exception (`OutOfRangeError` or `StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be precise, the ops created by `input_fn` produce one batch of the data. The `predict()` API processes one batch at a time. When reaching the end of the data source, an end-of-input exception should be raised by one of these operations. The user usually does not need to do this manually. As long as the dataset is not repeated forever, the `tf.data` API will raise an end-of-input exception automatically after the last batch has been produced. Note: Estimator.predict returns a Python generator. Please consume all the data from the generator so that TPUEstimator can shutdown the TPU system properly for user. Current limitations: -------------------- 1. TPU prediction only works on a single host (one TPU worker). 2. `input_fn` must return a `Dataset` instance rather than `features`. In fact, .train() and .evaluate() also support Dataset as return value. Example (MNIST): ---------------- ``` height = 32 width = 32 total_examples = 100 def predict_input_fn(params): batch_size = params['batch_size'] images = tf.random_uniform( [total_examples, height, width, 3], minval=-1, maxval=1) dataset = tf.data.Dataset.from_tensor_slices(images) dataset = dataset.map(lambda images: {'image': images}) dataset = dataset.batch(batch_size) return dataset def model_fn(features, labels, params, mode): # Generate predictions, called 'output', from features['image'] if mode == tf.estimator.ModeKeys.PREDICT: return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ 'predictions': output, 'is_padding': features['is_padding'] }) tpu_est = TPUEstimator( model_fn=model_fn, ..., predict_batch_size=16) # Fully consume the generator so that TPUEstimator can shutdown the TPU # system. for item in tpu_est.predict(input_fn=input_fn): # Filter out item if the `is_padding` is 1. # Process the 'predictions' ``` Exporting ========= `export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`, and another with `tag_constants.SERVING` and `tag_constants.TPU`. At serving time, these tags are used to select metagraph to load. Before running the graph on TPU, TPU system needs to be initialized. If TensorFlow Serving model-server is used, this is done automatically. If not, please call `session.run(tpu.initialize_system())`. `tpu.outside_compilation` can be used to wrap TPU incompatible ops in `model_fn`. Example: ---------------- ``` def model_fn(features, labels, mode, config, params): ... logits = ... export_outputs = { 'logits': export_output_lib.PredictOutput( {'logits': logits}) } def host_call(logits): class_ids = math_ops.argmax(logits) classes = string_ops.as_string(class_ids) export_outputs['classes'] = export_output_lib.ClassificationOutput(classes=classes) tpu.outside_compilation(host_call, logits) ... ``` """ def __init__(self, model_fn=None, model_dir=None, config=None, params=None, use_tpu=True, train_batch_size=None, eval_batch_size=None, predict_batch_size=None, batch_axis=None, eval_on_tpu=True, export_to_tpu=True, warm_start_from=None): """Constructs an `TPUEstimator` instance. Args: model_fn: Model function as required by `Estimator` which returns EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks', and `prediction_hooks` must not capure any TPU Tensor inside the model_fn. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. If `None`, the model_dir in `config` will be used if set. If both are set, they must be same. If both are `None`, a temporary directory will be used. config: An `tpu_config.RunConfig` configuration object. Cannot be `None`. params: An optional `dict` of hyper parameters that will be passed into `input_fn` and `model_fn`. Keys are names of parameters, values are basic python types. There are reserved keys for `TPUEstimator`, including 'batch_size'. use_tpu: A bool indicating whether TPU support is enabled. Currently, - TPU training and evaluation respect this bit, but eval_on_tpu can override execution of eval. See below. - Predict still happens on CPU. train_batch_size: An int representing the global training batch size. TPUEstimator transforms this global batch size to a per-shard batch size, as params['batch_size'], when calling `input_fn` and `model_fn`. Cannot be `None` if `use_tpu` is `True`. Must be divisible by total number of replicas. eval_batch_size: An int representing evaluation batch size. Must be divisible by total number of replicas. predict_batch_size: An int representing the prediction batch size. Must be divisible by total number of replicas. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. For example, if your input_fn produced (images, labels) where the images tensor is in `HWCN` format, your shard dimensions would be [3, 0], where 3 corresponds to the `N` dimension of your images Tensor, and 0 corresponds to the dimension along which to split the labels to match up with the corresponding images. If None is supplied, and per_host_input_for_training is True, batches will be sharded based on the major dimension. If tpu_config.per_host_input_for_training is False or `PER_HOST_V2`, batch_axis is ignored. eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`. export_to_tpu: If True, `export_savedmodel()` exports a metagraph for serving on TPU besides the one on CPU. warm_start_from: Optional string filepath to a checkpoint or SavedModel to warm-start from, or a `tf.estimator.WarmStartSettings` object to fully configure warm-starting. If the string filepath is provided instead of a `WarmStartSettings`, then all variables are warm-started, and it is assumed that vocabularies and Tensor names are unchanged. Raises: ValueError: `params` has reserved keys already. """ if config is None or not isinstance(config, tpu_config.RunConfig): raise ValueError( '`config` must be provided with type `tpu_config.RunConfig`') if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS): raise ValueError('{} are reserved keys but existed in params {}.'.format( _RESERVED_PARAMS_KEYS, params)) if use_tpu: # Perform some very basic validations. More validations will be found in # _InternalTPUContext. if train_batch_size is None: raise ValueError('`train_batch_size` cannot be `None`') util_lib.check_positive_integer(train_batch_size, 'train_batch_size') if (config.tpu_config.per_host_input_for_training is tpu_config.InputPipelineConfig.PER_SHARD_V1 and config.tpu_config.num_cores_per_replica): raise ValueError( 'Model parallelism only supports per host input for training. ' 'Please adjust TPURunconfig.per_host_input_for_training.') if eval_batch_size is not None: util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size') if predict_batch_size is not None: util_lib.check_positive_integer(predict_batch_size, 'predict_batch_size') # Verifies the model_fn signature according to Estimator framework. estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access # We cannot store config and params in this constructor as parent # constructor might change them, such as assigning a temp dir for # config.model_dir. model_function = self._augment_model_fn(model_fn, batch_axis) # Overwrite log_step_count_steps to disable TensorLoggingHook and # StepCounterHook from being created in Estimator. TPUEstimator already # added equivalent hooks in _augment_model_fn above. self._log_every_n_steps = config.log_step_count_steps config = config.replace(log_step_count_steps=None) # Passing non-None params as wrapped model_fn has it. params = params or {} super(TPUEstimator, self).__init__( model_fn=model_function, model_dir=model_dir, config=config, params=params, warm_start_from=warm_start_from) self._iterations_per_training_loop = ( self._config.tpu_config.iterations_per_loop) # All properties passed to _InternalTPUContext are immutable. # pylint: disable=protected-access self._ctx = tpu_context._get_tpu_context( self._config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu, eval_on_tpu) self._export_to_tpu = export_to_tpu self._is_input_fn_invoked = None self._rendezvous = {} def _add_meta_graph_for_mode(self, builder, input_receiver_fn_map, checkpoint_path, save_variables=True, mode=model_fn_lib.ModeKeys.PREDICT, export_tags=None, check_variables=True): if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT: raise NotImplementedError( 'TPUEstimator only handles mode PREDICT for exporting ' 'when `export_to_tpu` is `True`; ' 'got {}.'.format(mode)) (super(TPUEstimator, self)._add_meta_graph_for_mode( builder, input_receiver_fn_map, checkpoint_path, save_variables, mode=mode, export_tags=export_tags, check_variables=check_variables)) if self._export_to_tpu: input_receiver_fn_map = { _REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode] } export_tags = [tag_constants.SERVING, tag_constants.TPU] mode = _REWRITE_FOR_INFERENCE_MODE # See b/110052256 for why `check_variables` is `False`. (super(TPUEstimator, self)._add_meta_graph_for_mode( builder, input_receiver_fn_map, checkpoint_path, save_variables=False, mode=mode, export_tags=export_tags, check_variables=False)) def _call_model_fn(self, features, labels, mode, config): if mode == _REWRITE_FOR_INFERENCE_MODE: return self._call_model_fn_for_inference(features, labels, mode, config) else: return super(TPUEstimator, self)._call_model_fn(features, labels, mode, config) def _call_model_fn_for_inference(self, features, labels, mode, config): """Wraps `_call_model_fn` for `export_savedmodel`.""" if mode != _REWRITE_FOR_INFERENCE_MODE: raise ValueError('mode must be {}; ' 'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode)) capture = _CapturedObject() def computation(): """Compute tpu tensors used in export_outputs. Passed to rewrite_for_inference so that model_fn will be called under the rewriting contexts. Only tpu tensors are returned, but export_outputs and scaffold are captured. Returns: A list of Tensors used in export_outputs and not marked for outside_compilation. """ # We should only call model fn once and it should be inside `computation` # so that building the graph will happen under `rewrite_for_inference`. mode = model_fn_lib.ModeKeys.PREDICT estimator_spec = self._call_model_fn(features, labels, mode, config) # We pick the TPU tensors out from `export_output` and later return them # from `computation` for rewriting. tensors_dict = collections.OrderedDict( (k, _export_output_to_tensors(v)) for k, v in six.iteritems(estimator_spec.export_outputs)) tensors = nest.flatten(tensors_dict) tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)] # We cannot return anything other than `tpu_tensors` here so we capture # the rest for later use. capture.capture((estimator_spec, tensors_dict, tensors)) return tpu_tensors tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation) estimator_spec, tensors_dict, tensors = capture.get() # Reconstruct `tensors`, but with `tpu_tensors` replaced with # `tpu_tensors_on_cpu`. new_tensors = [] for t in tensors: if _is_tpu_tensor(t): new_tensors.append(tpu_tensors_on_cpu.pop(0)) elif t is None: new_tensors.append(None) else: # Only fetching `tpu_tensors_on_cpu` does not trigger # TPU computation and blocks, so we add the control dependency here. control_inputs = ( tpu_tensors_on_cpu if _is_iterable(tpu_tensors_on_cpu) else (tpu_tensors_on_cpu,)) with ops.control_dependencies(control_inputs): new_tensors.append(array_ops.identity(t)) # Reconstruct `tensors_dict`. new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors) # Reconstruct `export_outputs`. export_outputs = estimator_spec.export_outputs new_export_outputs = collections.OrderedDict( (k, _clone_export_output_with_tensors(export_outputs[k], v)) for k, v in six.iteritems(new_tensors_dict)) return estimator_spec._replace(export_outputs=new_export_outputs) def _create_global_step(self, graph): """Creates a global step suitable for TPUs. Args: graph: The graph in which to create the global step. Returns: A global step `Tensor`. Raises: ValueError: if the global step tensor is already defined. """ return _create_global_step(graph) def _convert_train_steps_to_hooks(self, steps, max_steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_train_steps_to_hooks( steps, max_steps) # On TPU. if steps is None and max_steps is None: raise ValueError( 'For TPU training, one of `steps` or `max_steps` must be set. ' 'Cannot be both `None`.') # Estimator.train has explicit positiveness check. if steps is not None: util_lib.check_positive_integer(steps, 'Train steps') if max_steps is not None: util_lib.check_positive_integer(max_steps, 'Train max_steps') return [ _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps) ] def _convert_eval_steps_to_hooks(self, steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps) if steps is None: raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.') util_lib.check_positive_integer(steps, 'Eval steps') return [ evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access num_evals=steps), _SetEvalIterationsHook(steps) ] def _call_input_fn(self, input_fn, mode): """Calls the input function. Args: input_fn: The input function. mode: ModeKeys Returns: In TPU mode, returns an input_fn to be called later in model_fn. Otherwise, calls the input_fn and returns either fatures or (features, labels). Raises: ValueError: if input_fn takes invalid arguments or does not have `params`. """ input_fn_args = function_utils.fn_args(input_fn) config = self.config # a deep copy. kwargs = {} if 'params' in input_fn_args: kwargs['params'] = self.params # a deep copy. else: raise ValueError('input_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params["batch_size"]'.format(input_fn)) if 'config' in input_fn_args: kwargs['config'] = config if 'mode' in input_fn_args: kwargs['mode'] = mode # Records the fact input_fn has been invoked. self._is_input_fn_invoked = True with self._ctx.with_mode(mode) as ctx: # Setting the batch size in params first. This helps user to have same # input_fn for use_tpu=True/False. batch_size_for_input_fn = ctx.batch_size_for_input_fn if batch_size_for_input_fn is not None: _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY, batch_size_for_input_fn) # For export_savedmodel, input_fn is never passed to Estimator. So, # `is_export_mode` must be False. if ctx.is_running_on_cpu(is_export_mode=False): with ops.device('/device:CPU:0'): return input_fn(**kwargs) # For TPU computation, input_fn should be invoked in a tf.while_loop for # performance. While constructing the tf.while_loop, the structure of # inputs returned by the `input_fn` needs to be recorded. The structure # includes whether features or labels is dict or single Tensor, dict keys, # tensor shapes, and dtypes. The recorded structure is used to create the # infeed dequeue ops, which must be wrapped and passed as a Fn, called # inside the TPU computation, as the TPU computation is wrapped inside a # tf.while_loop also. So, we either pass input_fn to model_fn or pass # dequeue_fn to model_fn. Here, `input_fn` is passed directly as # `features` in `model_fn` signature. def _input_fn(ctx): _add_item_to_params(kwargs['params'], _CTX_KEY, ctx) return input_fn(**kwargs) return _input_fn def _validate_features_in_predict_input(self, result): """Skip the validation. For TPUEstimator, we do not need to check the result type. `_InputPipeline` has stronger check. Parent class's check generates confusing warning msg. Args: result: `features` returned by input_fn. """ pass def train(self, input_fn, hooks=None, steps=None, max_steps=None, saving_listeners=None): rendezvous = error_handling.ErrorRendezvous(num_sources=3) self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous try: return super(TPUEstimator, self).train( input_fn=input_fn, hooks=hooks, steps=steps, max_steps=max_steps, saving_listeners=saving_listeners) except Exception: # pylint: disable=broad-except rendezvous.record_error('training_loop', sys.exc_info()) finally: rendezvous.record_done('training_loop') rendezvous.raise_errors() def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None, name=None): rendezvous = error_handling.ErrorRendezvous(num_sources=3) self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous try: return super(TPUEstimator, self).evaluate( input_fn, steps=steps, hooks=hooks, checkpoint_path=checkpoint_path, name=name) except Exception: # pylint: disable=broad-except rendezvous.record_error('evaluation_loop', sys.exc_info()) finally: rendezvous.record_done('evaluation_loop') rendezvous.raise_errors() def predict(self, input_fn, predict_keys=None, hooks=None, checkpoint_path=None, yield_single_examples=True): rendezvous = error_handling.ErrorRendezvous(num_sources=3) self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous try: for result in super(TPUEstimator, self).predict( input_fn=input_fn, predict_keys=predict_keys, hooks=hooks, checkpoint_path=checkpoint_path, yield_single_examples=yield_single_examples): yield result except Exception: # pylint: disable=broad-except rendezvous.record_error('prediction_loop', sys.exc_info()) finally: rendezvous.record_done('prediction_loop') rendezvous.raise_errors() rendezvous.record_done('prediction_loop') rendezvous.raise_errors() def _augment_model_fn(self, model_fn, batch_axis): """Returns a new model_fn, which wraps the TPU support.""" def _model_fn(features, labels, mode, config, params): """A Estimator `model_fn` for TPUEstimator.""" with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx) # `input_fn` is called in `train()`, `evaluate()`, and `predict()`, # but not in `export_savedmodel()`. if self._is_input_fn_invoked: is_export_mode = False else: is_export_mode = True # Clear the bit. self._is_input_fn_invoked = None # examples_hook is added to training_hooks for both CPU and TPU # execution. if self._log_every_n_steps is not None: examples_hook = ExamplesPerSecondHook( ctx.global_batch_size, output_dir=self.model_dir, every_n_steps=self._log_every_n_steps) if ctx.is_running_on_cpu(is_export_mode=is_export_mode): logging.info('Running %s on CPU', mode) estimator_spec = model_fn_wrapper.call_without_tpu( features, labels, is_export_mode=is_export_mode) if self._log_every_n_steps is not None: estimator_spec = estimator_spec._replace( training_hooks=estimator_spec.training_hooks + (examples_hook,)) return estimator_spec assert labels is None, '`labels` passed to `model_fn` must be `None`.' # TPUEstimator._call_input_fn passes `input_fn` as features to here. assert callable(features), '`input_fn` is not callable.' input_fn = features input_holders = _InputPipeline(input_fn, batch_axis, ctx) enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) graph = ops.get_default_graph() for enqueue_op in enqueue_ops: if isinstance(enqueue_op, list): graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) else: graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) if mode == model_fn_lib.ModeKeys.TRAIN: compile_op, loss, host_call, scaffold, training_hooks = ( _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) host_ops = host_call.create_tpu_hostcall() if host_ops is None: host_ops = [] shutdown_hooks = [] shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE', 'shutdown_worker') if shutdown_mode: if shutdown_mode == 'shutdown_worker': finalizer_hooks = [ session_support.ShutdownLameWorkers(timeout_ms=60 * 1000), ] elif shutdown_mode == 'shutdown_computation': finalizer_hooks = [ session_support.RestartComputation(timeout_ms=60 * 1000), ] else: raise ValueError( 'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode) shutdown_hooks.append( session_support.GracefulShutdownHook( checkpoint_prefix=self.model_dir + '/model.ckpt', on_shutdown_hooks=finalizer_hooks)) with ops.control_dependencies([loss]): global_step = array_ops.identity(training.get_global_step()) hooks = input_hooks + shutdown_hooks hooks.extend([ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, host_ops, tpu_compile_op=compile_op, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator), rendezvous=self._rendezvous[mode], master=self._config.master, session_config=self._session_config, ), InstallSignalHandlerHook() ]) if self._log_every_n_steps is not None: logging_hook_frequency = ( # Divide and round up (self._log_every_n_steps + self._config.tpu_config.iterations_per_loop - 1) // self._config.tpu_config.iterations_per_loop) hooks.append( training.LoggingTensorHook({ 'loss': array_ops.identity(loss), 'step': global_step, }, every_n_iter=logging_hook_frequency)) examples_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) hooks.append(examples_hook) if training_hooks: hooks.extend(training_hooks) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): checkpoint_hook = training.CheckpointSaverHook( self.model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) chief_hooks.append(checkpoint_hook) summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) with ops.control_dependencies([loss]): update_ops = _sync_variables_ops(ctx) # Validate the TPU training graph to catch basic errors _validate_tpu_training_graph() train_op = control_flow_ops.group(*update_ops) graph.add_to_collection(_TPU_TRAIN_OP, train_op) return model_fn_lib.EstimatorSpec( mode, loss=loss, training_chief_hooks=chief_hooks, training_hooks=hooks, train_op=train_op, scaffold=scaffold) if mode == model_fn_lib.ModeKeys.EVAL: compile_op, total_loss, host_calls, scaffold, eval_hooks = ( _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) iterations_per_loop_var = _create_or_get_iterations_per_loop() mean_loss = math_ops.div( total_loss, math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype)) with ops.control_dependencies([mean_loss]): # After TPU evaluation computation is done (the mean_loss tensor), # reads all variables back from TPU and updates the eval step # counter properly internal_ops_to_run = _sync_variables_ops(ctx) internal_ops_to_run.append( _increase_eval_step_op(iterations_per_loop_var)) host_call_ret = host_calls.create_tpu_hostcall() eval_metric_ops = {} eval_update_ops = [] eval_metrics = host_call_ret.get('eval_metrics', {}) if eval_metrics: # Creates a dummy metric update_op for all metrics. Estimator # expects all metrics in `eval_metric_ops` have update_op and calls # them one by one. The real metric update_ops are invoked in a # separated thread. So, here give Estimator the dummy op for all # metrics. with ops.control_dependencies(internal_ops_to_run): dummy_update_op = control_flow_ops.no_op() for k, v in eval_metrics.items(): eval_metric_ops[k] = (v[0], dummy_update_op) eval_update_ops.append(v[1]) else: # If no eval metrics are passed, create an identity node for the # loss and add `internal_ops_to_run` to its dependencies. So # `internal_ops_to_run` can be executed. with ops.control_dependencies(internal_ops_to_run): mean_loss = array_ops.identity(mean_loss) if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, eval_update_ops + host_ops, tpu_compile_op=compile_op, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator), rendezvous=self._rendezvous[mode], master=self._config.evaluation_master, session_config=self._session_config, )] + input_hooks if eval_hooks: hooks.extend(eval_hooks) return model_fn_lib.EstimatorSpec( mode, loss=mean_loss, evaluation_hooks=hooks, eval_metric_ops=eval_metric_ops, scaffold=scaffold) # Predict assert mode == model_fn_lib.ModeKeys.PREDICT (compile_op, dummy_predict_op, host_calls, scaffold, prediction_hooks) = _predict_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) with ops.control_dependencies([dummy_predict_op]): internal_ops_to_run = _sync_variables_ops(ctx) with ops.control_dependencies(internal_ops_to_run): dummy_predict_op = control_flow_ops.no_op() # In train and evaluation, the main TPU program is passed to monitored # training session to run. Infeed enqueue and outfeed dequeue are # executed in side threads. This is not the configuration for # prediction mode. # # For prediction, the Estimator executes the EstimatorSpec.predictions # directly and yield the element (via generator) to call site. So, the # outfeed based prediction must be passed to MonitoredSession directly. # Other parts of the TPU execution are organized as follows. # # 1. All outfeed based Tensors must be grouped with predictions Tensors # to form a single invocation. This avoid the issue we might trigger # multiple outfeeds incorrectly. To achieve this, `host_call` is # placed in control_dependencies of `stopping_signals`, and # `stopping_signals` is passed into _StoppingPredictHook, which sets # the `stopping_signals` as SessionRunArgs. MonitoredSession merges # all SessionRunArgs with the fetch in session.run together. # # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) # are grouped together. They will be launched once and only once in # side threads and they quit naturally according to the SAME stopping # condition. enqueue_ops.append(dummy_predict_op) host_call_ret = host_calls.create_tpu_hostcall() if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] predictions = host_call_ret['predictions'] _verify_cross_hosts_transfer_size( predictions, message=( 'The estimated size for TPUEstimatorSpec.predictions is too ' 'large.')) signals = host_call_ret['signals'] with ops.control_dependencies(host_ops): host_ops = [] # Empty, we do do not need it anymore. scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( signals) predictions = _PaddingSignals.slice_tensor_or_dict( predictions, signals) hooks = [ _StoppingPredictHook(scalar_stopping_signal), TPUInfeedOutfeedSessionHookForPrediction( ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode], tpu_compile_op=compile_op, master=self._config.master, session_config=self._session_config), ] + input_hooks if prediction_hooks: hooks.extend(prediction_hooks) return model_fn_lib.EstimatorSpec( mode, prediction_hooks=hooks, predictions=predictions, scaffold=scaffold) return _model_fn def _is_tpu_tensor(tensor): if not isinstance(tensor, ops.Tensor): return False try: tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access except ValueError: return True else: return False def _export_output_to_tensors(export_output): """Get a list of `Tensors` used in `export_output`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. Returns: a list of tensors used in export_output. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): return [export_output.scores, export_output.classes] elif isinstance(export_output, export_output_lib.RegressionOutput): return [export_output.value] elif isinstance(export_output, export_output_lib.PredictOutput): return list(export_output.outputs.values()) else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _clone_export_output_with_tensors(export_output, tensors): """Clones `export_output` but with new `tensors`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. tensors: a list of `Tensors` used to construct a new `export_output`. Returns: A dict similar to `export_output` but with `tensors`. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): if len(tensors) != 2: raise ValueError('tensors must be of length 2; ' 'got {}.'.format(len(tensors))) return export_output_lib.ClassificationOutput(*tensors) elif isinstance(export_output, export_output_lib.RegressionOutput): if len(tensors) != 1: raise ValueError('tensors must be of length 1; ' 'got {}'.format(len(tensors))) return export_output_lib.RegressionOutput(*tensors) elif isinstance(export_output, export_output_lib.PredictOutput): return export_output_lib.PredictOutput( dict(zip(export_output.outputs.keys(), tensors))) else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() (single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks ) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn) def multi_tpu_eval_steps_on_single_shard(): return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step, [_ZERO_LOSS]) (compile_op, loss,) = tpu.split_compile_and_shard( multi_tpu_eval_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) loss = loss[0] scaffold = _get_scaffold(captured_scaffold_fn) return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get() def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() (single_tpu_train_step, host_call, captured_scaffold_fn, captured_training_hooks) = ( model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) def multi_tpu_train_steps_on_single_shard(): return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step, [_INITIAL_LOSS]) (compile_op, loss,) = tpu.split_compile_and_shard( multi_tpu_train_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) loss = loss[0] scaffold = _get_scaffold(captured_scaffold_fn) return compile_op, loss, host_call, scaffold, captured_training_hooks.get() def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" (single_tpu_predict_step, host_calls, captured_scaffold_fn, captured_predict_hooks ) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn) def multi_tpu_predict_steps_on_single_shard(): def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) inputs = [_StopSignals.NON_STOPPING_SIGNAL] outputs = training_loop.while_loop( cond, single_tpu_predict_step, inputs=inputs, name=b'loop') return outputs (compile_op, dummy_predict_op,) = tpu.split_compile_and_shard( multi_tpu_predict_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) dummy_predict_op = dummy_predict_op[0] scaffold = _get_scaffold(captured_scaffold_fn) return (compile_op, dummy_predict_op, host_calls, scaffold, captured_predict_hooks.get()) def _wrap_computation_in_while_loop(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): with ops.control_dependencies(op_fn()): return i + 1 iterations_per_loop_var = _create_or_get_iterations_per_loop() # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): iterations = array_ops.identity(iterations_per_loop_var) return control_flow_ops.while_loop( lambda i: i < iterations, computation, [constant_op.constant(0)], parallel_iterations=1) def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1) def _validate_tpu_training_graph(): """Validate graph before running distributed training. Raises: ValueError: If the graph seems invalid for running on device """ operations = ops.get_default_graph().get_operations() # Check if there is atleast one CrossReplicaSum operation in the graph # This should be introduced by using the CrossShardOptimizer wrapper cross_replica_sum_ops = [ o for o in operations if o.type == _CROSS_REPLICA_SUM_OP ] if not cross_replica_sum_ops: raise ValueError( 'CrossShardOptimizer must be used for model training on TPUs.') class _CapturedObject(object): """A placeholder to capture an object. This is useful when we need to capture a Python object in the Tensorflow control flow body function and use it outside the control flow. """ def __init__(self): self._object = None self._captured = False def capture(self, o): if self._captured: raise RuntimeError( 'InternalError: Object can capture only once. Please file bug.') self._captured = True self._object = o def get(self): if not self._captured: raise RuntimeError( 'InternalError: Object is not captured properly before `get`. ' 'Please file bug.') return self._object def _get_scaffold(captured_scaffold_fn): """Retrieves the Scaffold from `captured_scaffold_fn`.""" with _CapturingContext(message='Inside scaffold_fn'): scaffold_fn = captured_scaffold_fn.get() if scaffold_fn: scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') else: scaffold = None if scaffold: wrapped_finalize = scaffold.finalize def _finalize(): with _CapturingContext('Inside Scaffold.finalize'): wrapped_finalize() scaffold.finalize = _finalize return scaffold class _CapturingContext(control_flow_ops.ControlFlowContext): """Tracks references to Tensors defined in TPU replication.""" def __init__(self, message): control_flow_ops.ControlFlowContext.__init__(self) self._message = message def to_control_flow_context_def(self, context_def, export_scope=None): # pylint: disable=useless-super-delegation # NOTE(slebedev): the method is required by `ControlFlowContext`. super(_CapturingContext, self).to_control_flow_context_def( context_def, export_scope) def AddOp(self, op): # pylint: disable=invalid-name for c in op.inputs: if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access raise ValueError('{}: Op {} depends on TPU computation {}, ' 'which is not allowed.'.format(self._message, op, c)) def __enter__(self): # pylint: disable=protected-access self._g = ops.get_default_graph() self._old = self._g._get_control_flow_context() self._g._set_control_flow_context(self) # pylint: enable=protected-access def __exit__(self, _, __, ___): # pylint: disable=invalid-name self._g._set_control_flow_context(self._old) # pylint: disable=protected-access class _Inputs(object): """A data structure representing the input_fn returned values. This also supports the returned value from input_fn as `Dataset`. """ def __init__(self, features=None, labels=None, dataset=None, signals=None): if dataset is not None and (features is not None or labels is not None or signals is not None): raise RuntimeError('Internal Error: Either (features and labels) or ' 'dataset should be provided, not both. Please file ' 'bug') self._features = features self._labels = labels self._signals = signals self._dataset = dataset self._iterator = None @staticmethod def from_input_fn(return_values): """Returns an `_Inputs` instance according to `input_fn` return value.""" if isinstance(return_values, dataset_ops.DatasetV2): dataset = return_values return _Inputs(dataset=dataset) features, labels = _Inputs._parse_inputs(return_values) return _Inputs(features, labels) @staticmethod def _parse_inputs(return_values): if isinstance(return_values, tuple): features, labels = return_values else: features, labels = return_values, None return features, labels @property def is_dataset(self): """Returns True if the return value from input_fn is Dataset.""" return self._dataset is not None def dataset_initializer(self): """Returns the dataset's initializer. The initializer must be run before calling `features_and_labels`. """ self._iterator = dataset_ops.make_initializable_iterator(self._dataset) return self._iterator.initializer def features_and_labels(self): """Gets `features` and `labels`.""" if self.is_dataset: if self._iterator is None: raise RuntimeError('Internal error: Must run dataset_initializer ' 'before calling features_and_labels(). Please file ' 'a bug!') return _Inputs._parse_inputs(self._iterator.get_next()) return (self._features, self._labels) def signals(self): return self._signals @property def dataset(self): return self._dataset class _InputsWithStoppingSignals(_Inputs): """Inputs with `_StopSignals` inserted into the dataset.""" def __init__(self, dataset, batch_size, add_padding=False, num_invocations_per_step=1): assert dataset is not None user_provided_dataset = dataset.map( _InputsWithStoppingSignals.insert_stopping_signal( stop=False, batch_size=batch_size, add_padding=add_padding)) if num_invocations_per_step == 1: final_batch_dataset = dataset.take(1).map( _InputsWithStoppingSignals.insert_stopping_signal( stop=True, batch_size=batch_size, add_padding=add_padding)) else: # We append (2 * num_invocations_per_step - 1) batches for exhausting the # user_provided_dataset and stop properly. # For example, if num_invocations_per_step is 2, we append 3 additional # padding batches: b1, b2, b3. # If user_provided_dataset contains two batches: a1, a2 # Step 1: [a1, a2] # Step 2: [b1, b2] -> STOP # If user_provided_dataset contains three batches: a1, a2, a3. # The training loops: # Step 1: [a1, a2] # Step 2: [a3, b1] # Step 3: [b2, b3] -> STOP. final_batch_dataset = dataset.take(1).map( _InputsWithStoppingSignals.insert_stopping_signal( stop=True, batch_size=batch_size, add_padding=add_padding)) final_batch_dataset = final_batch_dataset.repeat( 2 * num_invocations_per_step - 1) def _set_mask(data_dict): signals = data_dict['signals'] signals['padding_mask'] = array_ops.ones_like(signals['padding_mask']) data_dict['signals'] = signals return data_dict # Mask out the extra batch. final_batch_dataset = final_batch_dataset.map(_set_mask) dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2) super(_InputsWithStoppingSignals, self).__init__(dataset=dataset) self._current_inputs = None def features_and_labels(self): if self._current_inputs is not None: raise RuntimeError( 'Internal Error: The previous inputs have not been properly ' 'consumed. First call features_and_labels, then call signals.') inputs_with_signals = self._iterator.get_next() features = inputs_with_signals['features'] labels = inputs_with_signals.get('labels') self._current_inputs = inputs_with_signals return features, labels def signals(self): """Returns the `Signals` from `_Inputs`.""" if self._current_inputs is None: raise RuntimeError( 'Internal Error: The current inputs have not been properly ' 'generated. First call features_and_labels, then call signals.') signals = self._current_inputs['signals'] self._current_inputs = None return signals @staticmethod def insert_stopping_signal(stop, batch_size, add_padding=False): """Inserts stopping_signal into dataset via _map_fn. Here we change the data structure in the dataset, such that the return value is a dictionary now and `features`, `labels`, and `signals` are three distinguished keys in that dict. This provides a better structure, which eases the process to decompose the inputs (see `features_and_labels`). Args: stop: bool, state of current stopping signals. batch_size: int, batch size. add_padding: bool, whether to pad the tensor to full batch size. Returns: A map_fn passed to dataset.map API. """ def _map_fn(*args): """The map fn to insert signals.""" if len(args) == 1: # Unpack the single Tensor/dict argument as features. This is required # for the input_fn returns no labels. args = args[0] features, labels = _Inputs._parse_inputs(args) new_input_dict = {} if add_padding: padding_mask, features, labels = ( _PaddingSignals.pad_features_and_labels(features, labels, batch_size)) new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels else: new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels padding_mask = None new_input_dict['signals'] = _StopSignals( stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict() return new_input_dict return _map_fn class _StopSignals(object): """Signals class holding all logic to handle TPU stopping condition.""" NON_STOPPING_SIGNAL = False STOPPING_SIGNAL = True def __init__(self, stop, batch_size, padding_mask=None): self._stop = stop self._batch_size = batch_size self._padding_mask = padding_mask def as_dict(self): """Returns the signals as Python dict.""" shape = [self._batch_size, 1] dtype = dtypes.bool if self._stop: stopping = array_ops.ones(shape=shape, dtype=dtype) else: stopping = array_ops.zeros(shape=shape, dtype=dtype) signals = {'stopping': stopping} if self._padding_mask is not None: signals['padding_mask'] = self._padding_mask return signals @staticmethod def as_scalar_stopping_signal(signals): return array_ops.identity(signals['stopping'][0][0]) @staticmethod def should_stop(scalar_stopping_signal): """Detects whether scalar_stopping_signal indicates stopping.""" if isinstance(scalar_stopping_signal, ops.Tensor): # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF # way to express the bool check whether scalar_stopping_signal is True. return math_ops.logical_and(scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL) else: # For non Tensor case, it is used in SessionRunHook. So, we cannot modify # the graph anymore. Here, we use pure Python. return bool(scalar_stopping_signal) class _PaddingSignals(object): """Signals class holding all logic to handle padding.""" @staticmethod def pad_features_and_labels(features, labels, batch_size): """Pads out the batch dimension of features and labels.""" real_batch_size = array_ops.shape( _PaddingSignals._find_any_tensor(features))[0] batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) check_greater = check_ops.assert_greater_equal( batch_size_tensor, real_batch_size, data=(batch_size_tensor, real_batch_size), message='The real batch size should not be greater than batch_size.') with ops.control_dependencies([check_greater]): missing_count = batch_size_tensor - real_batch_size def pad_single_tensor(tensor): """Pads out the batch dimension of a tensor to the complete batch_size.""" rank = len(tensor.shape) assert rank > 0 padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) padded_shape = (batch_size,) + tuple(tensor.shape[1:]) padded_tensor = array_ops.pad(tensor, padding) padded_tensor.set_shape(padded_shape) return padded_tensor def nest_pad(tensor_or_dict): return nest.map_structure(pad_single_tensor, tensor_or_dict) features = nest_pad(features) if labels is not None: labels = nest_pad(labels) padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count, batch_size) return padding_mask, features, labels @staticmethod def slice_tensor_or_dict(tensor_or_dict, signals): """Slice the real Tensors according to padding mask in signals.""" padding_mask = signals['padding_mask'] batch_size = array_ops.shape(padding_mask)[0] def verify_batch_size(tensor): check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) with ops.control_dependencies([check_batch_size]): return array_ops.identity(tensor) def slice_single_tensor(tensor): rank = len(tensor.shape) assert rank > 0 real_batch_size = batch_size - math_ops.reduce_sum(padding_mask) return verify_batch_size(tensor)[0:real_batch_size] # As we split the Tensors to all TPU cores and concat them back, it is # important to ensure the real data is placed before padded ones, i.e., # order is preserved. By that, the sliced padding mask should have all 0's. # If this assertion failed, # the slice logic here would not hold. sliced_padding_mask = slice_single_tensor(padding_mask) assert_padding_mask = math_ops.equal( math_ops.reduce_sum(sliced_padding_mask), 0) with ops.control_dependencies([assert_padding_mask]): should_stop = _StopSignals.should_stop( _StopSignals.as_scalar_stopping_signal(signals)) is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0) def slice_fn(tensor): # If the current batch is full batch or part of stopping signals, we do # not need to slice to save performance. return control_flow_ops.cond( math_ops.logical_or(should_stop, is_full_batch), (lambda: verify_batch_size(tensor)), (lambda: slice_single_tensor(tensor))) return nest.map_structure(slice_fn, tensor_or_dict) @staticmethod def _find_any_tensor(batch_features): tensors = [ x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor) ] if not tensors: raise ValueError('Cannot find any Tensor in features dict.') return tensors[0] @staticmethod def _padding_mask(real_batch_size, missing_count, batch_size): padding_mask = array_ops.concat([ array_ops.zeros((real_batch_size,), dtype=dtypes.int32), array_ops.ones((missing_count,), dtype=dtypes.int32) ], axis=0) padding_mask.set_shape((batch_size,)) return padding_mask def _verify_cross_hosts_transfer_size(tensor_dict, message): total_size = 0 tensor_structure = {} for key, tensor in tensor_dict.items(): shape = tensor.shape size = np.product(shape) * tensor.dtype.size tensor_structure[key] = shape total_size += size if total_size >= _ONE_GIGABYTE: raise ValueError( '{} The transfer size is larger than the protobuf limit. Please ' 'consider to use Tensors with smaller shapes or reduce batch ' 'size. Given:\n' '{}'.format( message, '\n'.join([ ' -- Key: {}, Shape: {}'.format(k, v) for k, v in tensor_structure.items() ]))) def _add_item_to_params(params, key, value): """Adds a new item into `params`.""" if isinstance(params, hparam.HParams): # For HParams, we need to use special API. if key in params: params.set_hparam(key, value) else: params.add_hparam(key, value) else: # Now params is Python dict. params[key] = value def export_estimator_savedmodel(estimator, export_dir_base, serving_input_receiver_fn, assets_extra=None, as_text=False, checkpoint_path=None, strip_default_attrs=False): """Export `Estimator` trained model for TPU inference. Args: estimator: `Estimator` with which model has been trained. export_dir_base: A string containing a directory in which to create timestamped subdirectories containing exported SavedModels. serving_input_receiver_fn: A function that takes no argument and returns a `ServingInputReceiver` or `TensorServingInputReceiver`. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel, or `None` if no extra assets are needed. as_text: whether to write the SavedModel proto in text format. checkpoint_path: The checkpoint path to export. If `None` (the default), the most recent checkpoint found within the model directory is chosen. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. Returns: The string path to the exported directory. """ # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use # `estimator.config`. config = tpu_config.RunConfig(model_dir=estimator.model_dir) est = TPUEstimator( estimator._model_fn, # pylint: disable=protected-access config=config, params=estimator.params, use_tpu=True, train_batch_size=2048, # Does not matter. eval_batch_size=2048, # Does not matter. ) return est.export_savedmodel(export_dir_base, serving_input_receiver_fn, assets_extra, as_text, checkpoint_path, strip_default_attrs)
main.py
from flask import Flask, Blueprint, redirect, url_for, render_template, request, json, jsonify from werkzeug.utils import secure_filename from threading import Thread import os from configuration import CommonConfigurations, Configuration from piece import CommonPieces, Piece configurations_bp = Blueprint( 'configurations', __name__, static_folder='static', template_folder=os.path.join('web', 'templates', 'public', 'resources', 'configurations')) pieces_bp = Blueprint( 'pieces', __name__, static_folder='static', template_folder=os.path.join('web', 'templates', 'public', 'resources', 'pieces')) user_made_pieces_bp = Blueprint( 'user_made_pieces', __name__, static_folder='static', template_folder=os.path.join('web', 'templates', 'public', 'resources', 'pieces', 'user_made')) user_made_configurations_bp = Blueprint( 'user_made_configurations', __name__, static_folder='static', template_folder=os.path.join('web', 'templates', 'public', 'resources', 'configurations', 'user_made')) app = Flask(__name__, static_url_path='', static_folder=os.path.join('web', 'static'), template_folder=os.path.join('web', 'templates', 'public')) @app.route('/') def index(): return render_template('index.html') def get_file_names_only(folder): path = os.path.join(app.template_folder, 'resources', folder) return [os.path.splitext(f.name)[0] for f in os.scandir(path) if os.path.isfile(f.path)] @app.route('/configurations') def configurations(): return render_template('configurations.html', configurations=get_file_names_only('configurations'), user_made_configurations=list(set(get_file_names_only('configurations\\user_made')))) @configurations_bp.route('/configurations/<path:path>') def render_configuration_file(path): return render_template(os.path.join('{}.html'.format(path))) @app.route('/pieces') def pieces(): return render_template('pieces.html', pieces=get_file_names_only('pieces'), user_made_pieces=list(set(get_file_names_only('pieces\\user_made')))) # ignore json files @pieces_bp.route('/pieces/<path:path>') def render_piece_file(path): piece_template_name = os.path.join('{}.html'.format(path)) piece_path = os.path.join(app.root_path, pieces_bp.template_folder, piece_template_name) if os.path.exists(piece_path): return render_template(piece_template_name) return redirect(url_for('user_made_pieces.render_user_piece_file', path=path)) @user_made_pieces_bp.route('/pieces/user_made/<path:path>') def render_user_piece_file(path): piece_template_name = os.path.join('{}.html'.format(path)) return render_template(piece_template_name) def try_delete(files): for file in files: try: os.remove(file) except FileNotFoundError: pass except Exception as e: print(e) @app.route('/remove/pieces/<path:path>') def remove_user_made_piece_files(path): files_to_delete = list() files_to_delete.append( '{}\\{}\\resources\\pieces\\user_made\\{}.html'.format(app.root_path, app.template_folder, path)) files_to_delete.append( '{}\\{}\\resources\\pieces\\user_made\\{}.json'.format(app.root_path, app.template_folder, path)) try_delete(files_to_delete) return redirect(url_for('pieces')) @app.route('/add/pieces') def add_pieces(): return render_template('add_piece.html') @app.route('/add/piece', methods=['POST']) def add_piece(): print(request.form) size = [int(request.form['height']), int(request.form['width'])] filled_cells = [int(x) for x in request.form['selected_cells'].split(',')] fill_all = False if len(filled_cells) == size[0] * size[1]: fill_all = True filled_cells = None else: filled_cells = [(x // size[1], x % size[1]) for x in filled_cells] size = [size[0], size[1]] user_made = True name = request.form['name'] print(filled_cells) piece = Piece( size=size, filled_cells=filled_cells, fill_all=fill_all, name=name, user_made=user_made) piece.serialize() Thread(target=piece.output_piece_plotly()).start() # drawing the SVG using plotly takes a lot return redirect(url_for('pieces')) @app.route('/add/configurations') def add_configurations(): default_pieces = get_file_names_only('pieces') user_made_pieces = list(set(get_file_names_only('pieces\\user_made'))) all_pieces_available = list() all_pieces_available.extend(default_pieces) all_pieces_available.extend(user_made_pieces) return render_template('add_configuration.html', pieces=all_pieces_available) @app.route('/add/configuration', methods=['POST']) def add_configuration(): print(request.form) user_made = True name = request.form['name'] pieces_names = request.form['selected_pieces'].split(',') height = request.form['height'] # not relevant configuration = Configuration(name=name, pieces=None, user_made=user_made, pieces_names=pieces_names) configuration.serialize() Thread(target=configuration.output_piece_plotly()).start() # drawing the SVG using plotly takes a lot return redirect(url_for('configurations')) @app.route('/remove/configurations/<path:path>') def remove_user_made_configurations_files(path): files_to_delete = list() files_to_delete.append( '{}\\{}\\resources\\configurations\\user_made\\{}.html'.format(app.root_path, app.template_folder, path)) files_to_delete.append( '{}\\{}\\resources\\configurations\\user_made\\{}.json'.format(app.root_path, app.template_folder, path)) try_delete(files_to_delete) return redirect(url_for('configurations')) @app.route('/custom_configuration') def custom_configuration(): return render_template('custom_configuration.html') @user_made_configurations_bp.route('/configurations/user_made/<path:path>') def render_user_configuration_file(path): configuration_template_name = os.path.join('{}.html'.format(path)) return render_template(configuration_template_name) def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ['json'] @app.route('/custom_configuration/upload_result', methods=['POST']) def configuration_upload(): data = dict() file_path = '' file = request.files['upload_file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': data = {'success': False, 'supported': False, 'message': 'No selected file!'} elif file and allowed_file(file.filename): # json only filename = secure_filename(file.filename) file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(file_path) else: data = {'success': False, 'supported': False, 'message': 'File type not allowed!'} supported = True if file_path != '': with open(file_path, 'r') as f: json_object = json.load(f) if 'matrix-3d' in json_object: matrix_3d = json_object['matrix-3d'] for matrix in matrix_3d: matrix_supported = False for name, piece in CommonPieces.items(): if matrix == piece.matrix: matrix_supported = True break if not matrix_supported: """ TODO: check user made pieces? """ supported = False break else: supported = False else: supported = False if len(data) == 0: data = {'success': True, 'supported': supported, 'message': ''} return jsonify(data) def init_components(): # make sure that all configurations are built for name, configuration in CommonConfigurations.items(): configuration.output_piece_plotly() print('Created file for configuration {}.'.format(configuration.plotly_filename)) user_made_configuration_folder = '{}\\{}\\resources\\configurations\\user_made'.format( app.root_path, app.template_folder) json_files = get_file_names_only(user_made_configuration_folder) for filename in json_files: full_path = '{}.json'.format(os.path.join(user_made_configuration_folder, filename)) with open(full_path) as f: data = json.load(f) user_made = True name = data['name'] pieces_names = data['pieces'] configuration = Configuration(name=name, pieces=None, user_made=user_made, pieces_names=pieces_names) configuration.output_piece_plotly() # make sure that all pieces are built for name, piece in CommonPieces.items(): piece.output_piece_plotly() print('Created file for piece {}.'.format(piece.plotly_filename)) user_made_pieces_folder = '{}\\{}\\resources\\pieces\\user_made'.format(app.root_path, app.template_folder) json_files = get_file_names_only(user_made_pieces_folder) for filename in json_files: full_path = '{}.json'.format(os.path.join(user_made_pieces_folder, filename)) with open(full_path) as f: data = json.load(f) size = None matrix = data['matrix'] user_made = True name = data['name'] piece = Piece( size=size, filled_cells=None, fill_all=False, name=name, user_made=user_made, matrix=matrix) piece.output_piece_plotly() if __name__ == '__main__': thread = Thread(target=init_components) thread.start() app.config['UPLOAD_FOLDER'] = os.path.join('web', 'uploads') app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16 -> raises RequestEntityTooLarge exception app.register_blueprint(pieces_bp) app.register_blueprint(configurations_bp) app.register_blueprint(user_made_pieces_bp) app.register_blueprint(user_made_configurations_bp) app.run(debug=True)
maya.py
from __future__ import absolute_import import functools import threading import time from maya import cmds import mayatools.shelf from mayatools.geocache import utils as geocache_utils from uitools.threads import defer_to_main_thread, call_in_main_thread from .core import check_paths _check_lock = threading.Lock() def start_background_check(*args): # print '# Starting publish background check...' defer_to_main_thread(_update_buttons, None) references = call_in_main_thread(cmds.file, q=True, reference=True) geocaches = call_in_main_thread(geocache_utils.get_existing_cache_mappings).keys() threading.Thread(target=_background_check, args=[references + geocaches]).start() def _background_check(references): with _check_lock: statuses = check_paths(references, only_published=True) if not statuses: # print '# No publishes are referenced.' defer_to_main_thread(_update_buttons, True) return out_of_date = [] good = 0 for status in statuses: if status.is_latest: good += 1 else: out_of_date.append(status) if not out_of_date: # print '# None of the %d publishes are out of date.' % good defer_to_main_thread(_update_buttons, True) return # print '# %d publishes are out of date.' % len(out_of_date) defer_to_main_thread(_update_buttons, False) defer_to_main_thread(cmds.warning, '%d publish(es) are out of date.' % len(out_of_date)) def _update_buttons(status): image = { None: 'publishes/check_deps_unknown.png', True: 'publishes/check_deps_ok.png', False: 'publishes/check_deps_bad.png' }[status] # print '# Setting button image to', image for button in mayatools.shelf.buttons_from_uuid('sgpublish.mayatools.update_references:run'): cmds.shelfButton(button['name'], edit=True, image=image)
worker_thread.py
from petronia.util.rwlock import RWLock import threading import queue import traceback _STOP_THREAD_NOTICE = "STOP" _RUNNING_THREAD_QUEUES = [] def stop_all_threads(): # Create a copy of the queues, so we don't get # into a weird state mid-processing for q in list(_RUNNING_THREAD_QUEUES): q.put_nowait(_STOP_THREAD_NOTICE) class WorkerThread(object): def __init__(self, cid, daemon=False): self.__thread = threading.Thread( target=lambda: self._run(), daemon=daemon ) self.__thread.name = "Worker {0}".format(cid) self.__state = 0 self.__q = queue.Queue() self.__state_lock = RWLock() self.__thread.start() def queue(self, obj): if isinstance(obj, dict) and 'op' in obj and callable(obj['op']): if 'vargs' not in obj: obj['vargs'] = [] elif not (isinstance(obj['vargs'], list) or isinstance(obj['vargs'], tuple)): obj['vargs'] = [] if 'kargs' not in obj: obj['kargs'] = {} elif not isinstance(obj['kargs'], dict): obj['kargs'] = {} self.__q.put_nowait(obj) return True return False def stop(self, timeout=None): if threading.current_thread() == self.__thread: self.close() else: return self.__thread.join(timeout) def close(self): self.__state_lock.acquire_read() try: if self.__state < 2: self.__state_lock.promote() self.__state = 2 # Force the queue to wake up self.__q.put_nowait({'op': lambda: 0, 'vargs': [], 'kargs': {}}) finally: self.__state_lock.release() def _run(self): total = 0 _RUNNING_THREAD_QUEUES.append(self.__q) try: self.__state_lock.acquire_read() try: if self.__state != 0: # TODO correct exception raise Exception("Already started") self.__state_lock.promote() self.__state = 1 finally: self.__state_lock.release() while True: # Check the state at the loop start. self.__state_lock.acquire_read() try: if self.__state >= 2: break finally: self.__state_lock.release() action = self.__q.get() if action == _STOP_THREAD_NOTICE: self.close() elif action is not None: try: total += 1 action['op'](*action['vargs'], **action['kargs']) except BaseException as e: # TODO log the error better print("<<ERROR Worker Thread action failed: {0}>>".format(e)) traceback.print_exception(e, e, e) finally: # No lock on this, because it's not a read then write. self.__state = 3 if self.__q in _RUNNING_THREAD_QUEUES: _RUNNING_THREAD_QUEUES.remove(self.__q)
service.py
# -*- coding: utf-8 -*- """ Created on Thu Apr 9 17:20:43 2020 @author: niklas """ import re import os import sys import pwd import secrets import argparse import logging from logging.handlers import SysLogHandler from subprocess import Popen, PIPE from multiprocessing import Process, Pipe import pamela import tornado.ioloop import tornado.web import tornado.httpserver import tornado.httputil from tornado.web import url, MissingArgumentError from onelogin.saml2.auth import OneLogin_Saml2_Auth from onelogin.saml2.utils import OneLogin_Saml2_Utils session = {} class Application(tornado.web.Application): def __init__( self, cookie_secret, saml_path, logger, data_url, usage_url, debug=False, base_url="/shibboleth", https_reverse_proxy=True, remote_login="/jupyterhub", lowercase_uname=True, ): BASE_DIR = os.path.dirname(__file__) TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates') logger.debug("TEMPLATE PATH: %s", TEMPLATE_PATH) login_url = r"/sso" handlers_tmp = [ (r"/", IndexHandler, "index"), (login_url, SSOHandler, "login_sso"), (r"/acs", ACSHandler, "acs"), (r"/formular", FormularHandler, "formular"), (r"/create", CreateHandler, "create"), (r"/metadata", MetadataHandler, "saml_metadata"), (r"/logout", LogoutHandler, "logout"), ] handlers = [ url(base_url+x0, x1, name=x2) for (x0,x1,x2) in handlers_tmp ] settings = { "template_path": TEMPLATE_PATH, "autorealod": True, "debug": debug, "xsrf_cookies": True, "login_url": base_url+login_url, "cookie_secret": cookie_secret, # our own settings come here "logger": logger, "saml_path": saml_path, "https_reverse_proxy": https_reverse_proxy, "remote_login": remote_login, "data_url": data_url, "usage_url": usage_url, "lowercase_uname": lowercase_uname } tornado.web.Application.__init__(self, handlers, **settings) logger.info("created Application") class BaseHandler(tornado.web.RequestHandler): def initialize(self): self.log = self.application.settings.get('logger') self.saml_path = self.application.settings.get('saml_path') self.https_reverse_proxy = self.application.settings.get('https_reverse_proxy') self.remote_login = self.application.settings.get('remote_login') self.data_url = self.application.settings.get('data_url') self.usage_url = self.application.settings.get('usage_url') self.lowercase_uname = self.application.settings.get('lowercase_uname') self.special_chars = "@!%*#?§+" self.pw_min = 8 self.pw_max = 20 self.reg = "^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[{}])[A-Za-z0-9{}]{}$".format( self.special_chars, self.special_chars, "{"+str(self.pw_min)+","+str(self.pw_max)+"}" ) def get_current_user(self): return self.get_secure_cookie("uid", max_age_days=1) class SAMLHandler(BaseHandler): def prepare(self): request = self.request dataDict = {} for key in request.arguments: dataDict[key] = request.arguments[key][0].decode('utf-8') https = request == 'https' or self.https_reverse_proxy self.saml_req = { 'https': 'on' if https else 'off', 'http_host': tornado.httputil.split_host_and_port(request.host)[0], 'script_name': request.path, 'server_port': tornado.httputil.split_host_and_port(request.host)[1], 'get_data': dataDict, 'post_data': dataDict, 'query_string': request.query } def init_saml_auth(self): self.auth = OneLogin_Saml2_Auth( self.saml_req, custom_base_path=self.saml_path ) return self.auth class FormularHandler(BaseHandler): @tornado.web.authenticated def get(self): errors = None if "error" in self.request.arguments: errors = [ """Please follow the password rules and type in the same password twice. Do not forget to check the data protection declaration and terms of service.""" ] self.render( "formular.html", errors=errors, error_reason=None, special_chars=self.special_chars, min_chars=self.pw_min, max_chars=self.pw_max, data_url=self.data_url, usage_url=self.usage_url ) class CreateHandler(BaseHandler): @tornado.web.authenticated def post(self): error_args = False try: pw1 = self.get_argument("pw1") pw2 = self.get_argument("pw2") checked = self.get_argument("checks", "notcheck") == "checked" except MissingArgumentError: error_args = True # compiling regex pat = re.compile(self.reg) if pw1 != pw2 or not re.search(pat, pw1) or not checked or error_args: self.redirect(self.reverse_url("formular")+"?error") else: uname = self.get_secure_cookie("uid", max_age_days=1) # create new user self.log.debug(str(os.getuid())) proc = Popen( ["adduser", uname], stdout=PIPE, stderr=PIPE, preexec_fn=preexec_fn, encoding=sys.getdefaultencoding() ) out, err = proc.communicate() self.log.debug("out: %s", out) self.log.debug("err: %s", err) if err != "": self.log.error("adduser %s: %s", uname, err) self.set_status(500) self.write("Internal Server Error - Errorcode 500") else: self.log.info("adduser %s was succesfull", uname) parent_conn, child_conn = Pipe() p = Process(target=set_pwd, args=(uname, pw1, child_conn,)) p.start() retval = parent_conn.recv() self.log.debug("retval %s", retval) p.join() if retval == "": self.log.info("set pwd for %s was succesfull", uname) self.render( "success.html", remote_login=self.remote_login, errors=None ) else: self.log.error("set pwd for %s: %s", uname, retval) self.set_status(500) self.write("Internal Server Error - Errorcode 500") class IndexHandler(BaseHandler): def get(self): self.render( 'index.html', errors=None, error_reason=None, remote_login=self.remote_login ) class ACSHandler(SAMLHandler): # disable xsrf here... def check_xsrf_cookie(self): pass def post(self): auth = self.init_saml_auth() error_reason = None auth.process_response() errors = auth.get_errors() if len(errors) == 0: attributes = auth.get_attributes() self.log.debug("Attributes: %s", attributes) # test if user has the correct attributes if not "student" in attributes["urn:oid:1.3.6.1.4.1.5923.1.1.1.1"]: self.log.info("user is not a student.") self.render( "not_entitled.html", errors=None, error_reason=None, remote_login=self.remote_login ) return if not "Fb13" in attributes["urn:oid:1.3.6.1.4.1.8974.2.1.866"]: self.log.info("user is not a physicist.") self.render( "not_entitled.html", errors=None, error_reason=None, remote_login=self.remote_login ) return uname = attributes["urn:oid:0.9.2342.19200300.100.1.1"][0] if self.lowercase_uname: uname = uname.lower() user_exists = True try: entry = pwd.getpwnam(uname) except KeyError: user_exists = False if user_exists: self.log.info("user %s exists already.", uname) self.render( "user_exists.html", uname=uname, errors=None, error_reason=None, remote_login=self.remote_login ) return else: self.set_secure_cookie( "uid", uname, expires_days=1 ) self.log.info("user %s authenticated.", uname) self.redirect(self.reverse_url("formular")) return elif auth.get_settings().is_debug_active(): error_reason = auth.get_last_error_reason() self.render( 'index.html', errors=errors, error_reason=error_reason, remote_login=self.remote_login ) class SSOHandler(SAMLHandler): def get(self): self.init_saml_auth() return self.redirect(self.auth.login(self.reverse_url("formular"))) class LogoutHandler(BaseHandler): def get(self): self.clear_all_cookies() self.redirect(self.reverse_url("index")) class MetadataHandler(SAMLHandler): def get(self): auth = self.init_saml_auth() saml_settings = auth.get_settings() metadata = saml_settings.get_sp_metadata() errors = saml_settings.validate_metadata(metadata) if len(errors) == 0: # resp = HttpResponse(content=metadata, content_type='text/xml') self.set_header('Content-Type', 'text/xml') self.write(metadata) else: self.log.error(', '.join(errors)) self.set_status(500) self.write("Internal Server Error - Errorcode 500") def preexec_fn(): """Set the subprocess to root user""" os.setuid(0) os.setgid(0) def set_pwd(username, password, conn): """Change to root""" os.setuid(0) os.setgid(0) error = "" try: pamela.change_password( username, password, service="login", encoding='utf-8' ) except pamela.PAMError as e: error = str(e) conn.send(error) conn.close() def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""sso2linuxuser: create linux user after SSO""" ) parser.add_argument( "--saml_path", help="base", type=str, default="/opt/jupyterhub/etc/sso2linuxuser" ) parser.add_argument( "--base_url", help="base", type=str, default=r"/shibboleth" ) parser.add_argument( "--remote_login", help="base", type=str, default=r"/jupyterhub" ) parser.add_argument( "--data_url", help="base", type=str, default=r"/datenschutz" ) parser.add_argument( "--usage_url", help="base", type=str, default=r"/nutzungsbedingungen" ) parser.add_argument( "--port", help="base", type=int, default=8002 ) parser.add_argument( "--secret_nbytes", help="base", type=int, default=32 ) parser.add_argument( "--debug", help="base", action="store_true" ) parser.add_argument( "--lowercase_uname", help="base", action="store_true" ) parser.add_argument( "--https_reverse_proxy", help="base", action="store_true" ) parser.add_argument( "--syslog_address", type=str, default='/dev/log' ) args = parser.parse_args() port = args.port debug = args.debug loglevel = logging.DEBUG if debug else logging.INFO logger = logging.getLogger('sso2linuxuser') logger.setLevel(loglevel) h = SysLogHandler(address=args.syslog_address, facility="daemon") formatter = logging.Formatter( '[%(name)s-%(levelname)s %(lineno)d] %(message)s' ) h.setFormatter(formatter) h.setLevel(loglevel) logger.addHandler(h) h2 = SysLogHandler(address=args.syslog_address, facility="daemon") formatter = logging.Formatter( '[%(name)s-%(levelname)s tornado] %(message)s' ) h2.setFormatter(formatter) h2.setLevel(loglevel) logging.getLogger("tornado.access").addHandler(h2) logging.getLogger("tornado.application").addHandler(h2) logging.getLogger("tornado.general").addHandler(h2) # create cookie secret cookie_secret = secrets.token_hex(args.secret_nbytes) app = Application( cookie_secret=cookie_secret, saml_path=args.saml_path, logger=logger, debug=debug, base_url=args.base_url, https_reverse_proxy=args.https_reverse_proxy, remote_login=args.remote_login, data_url=args.data_url, usage_url=args.usage_url, lowercase_uname=args.lowercase_uname ) http_server = tornado.httpserver.HTTPServer(app) http_server.listen(port) logger.info("Listening on port %i", port) logger.debug("Debugging") tornado.ioloop.IOLoop.instance().start() if __name__ == "__main__": main()
proxy.pyw
# -*- coding: utf-8 -*- # socket proxy to run shell commands from Pharo # mainly for Windows, because ProcessWrapper is just broken and I am tired from subprocess import Popen,PIPE,STARTUPINFO,STARTF_USESHOWWINDOW import os import sys import json import socket from base64 import b64encode, b64decode import threading import logging from datetime import datetime import tempfile def configure_logging(): logDir = sys.path[0] + '\logs' if not os.path.isdir(logDir): os.mkdir(logDir) #/if logging.basicConfig( filename=logDir + '\\' + datetime.now().strftime('%Y-%m-%d.log'), format='[%(asctime)s] %(message)s', level=logging.DEBUG ) #/def configure_logging() PORT_FILE = tempfile.gettempdir() + '\pharo-shell-proxy-port.txt' class ShellServer(object): def __init__(self, host, port): logging.info('Starting new server...') self.host = host self.port = port #/def def create_socket(self): self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.bind((self.host, self.port)) if self.port == 0: self.port = self.socket.getsockname()[1] #/if with open(PORT_FILE, 'w') as f: f.write(str(self.port)) #/with logging.info('Started on %s', self.socket.getsockname()) #/def def run(self): self.create_socket() logging.info('Listening...') self.socket.listen(1) self.running = True while self.running: client, address = self.socket.accept() logging.info('Accepted client on %s', address) clientThread = threading.Thread(target=self.processClient,args=(client,address)) clientThread.start() #/while self.socket.close() logging.info('Stopping server...') #/def def processClient(self, client, address): logging.info('%s Processing client...', address) data = client.recv(1024).strip() logging.info('%s Received: %s', address, data) if data == 'terminate': logging.info('%s Received \'terminate\' command, closing.', address) client.close() self.terminate() return #/if if data == '': logging.info('%s Received no data, closing.', address) client.close() return #/if try: response = self.processCommand(data) client.sendall(response) except Exception: logging.exception('Client exception') #/try client.close() logging.info('%s Client processed.', address) #/def def terminate(self): self.running = False # Create an artificial connection to stop accept() from inside logging.info('Executing termination connection.') terminatingSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) terminatingSocket.connect((self.host, self.port,)) os.remove(PORT_FILE) #/def def processCommand(self, data): # data is B64(JSON(command)) logging.info('Processing command...') logging.debug('base64: %s', data) jsonString = b64decode(data) logging.debug('json: %s', jsonString) command = json.loads(jsonString) response = self.runCommand(command) response['stdout'] = b64encode(response['stdout']) response['stderr'] = b64encode(response['stderr']) logging.debug('Truncated response: %s', (response['exitCode'], response['stdout'][:100], response['stderr'][:100],)) return json.dumps(response) #/def def runCommand(self, args): # avoid popping up window info = STARTUPINFO() info.dwFlags |= STARTF_USESHOWWINDOW p = Popen(args, stdout=PIPE, stderr=PIPE, startupinfo=info) out, err = p.communicate() code = p.returncode return {'exitCode':code, 'stdout':out, 'stderr':err} #/def #/class if __name__ == '__main__': server = ShellServer('localhost', 0) server.run() logging.info('Bye.') #/if
test_operator.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: skip-file from __future__ import print_function from __future__ import division import numpy as np import mxnet as mx import copy import math import random import itertools from distutils.version import LooseVersion from numpy.testing import assert_allclose, assert_array_equal from mxnet.test_utils import * from mxnet.operator import * from mxnet.base import py_str, MXNetError, _as_list from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises from common import run_in_spawned_process from nose.tools import assert_raises, ok_ import unittest import os def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4): dshape = (N, T, I) data = mx.sym.Variable('data') Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True) mod1 = mx.mod.Module(Y1, label_names=None, context=default_context()) mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req) Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True) mod2 = mx.mod.Module(Y2, label_names=None, context=default_context()) mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req) mod1.init_params() args, auxs = mod1.get_params() args = cell1.unpack_weights(args) args = cell2.pack_weights(args) mod2.set_params(args, auxs) x = mx.random.uniform(shape=dshape) batch=mx.io.DataBatch(data=[x]) # check inference mod1.forward(batch, is_train=False) mod2.forward(batch, is_train=False) assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol) # check training mod1.forward(batch, is_train=True) mod2.forward(batch, is_train=True) assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol) dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape) mod1.backward(out_grads=[dy]) mod2.backward(out_grads=[dy]) if type(grad_req) is dict and grad_req['data'] == 'null' or grad_req == 'null': assert(mod1.get_input_grads()[0] == None) assert(mod2.get_input_grads()[0] == None) else: assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol) @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnn_with_new_param(): rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm'] ngates_ = [1, 1, 3, 4] num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8 for bidirectional in [False, True]: directions = 2 if bidirectional else 1 for mode, ngates in zip(rnn_modes, ngates_): first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \ * ngates * (num_layers - 1) param_size = (first_layer_size + rest_layer_size) * directions sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional, state_outputs=False, state_size=state_size, name='rnn') bind_dict = { 'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)), 'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)), 'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size)) } if mode == 'lstm': bind_dict['rnn_state_cell'] = mx.ndarray.zeros( shape=(num_layers * directions, batch_size, state_size)) ex = sym.bind(default_context(), bind_dict) ex.forward(is_train=True) ex01 = ex.output_dict['rnn_output'].asnumpy() ex.forward(is_train=False) ex02 = ex.output_dict['rnn_output'].asnumpy() assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4) bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)) ex.copy_params_from(bind_dict) ex.forward(is_train=True) ex03 = ex.output_dict['rnn_output'].asnumpy() ex.forward(is_train=False) ex04 = ex.output_dict['rnn_output'].asnumpy() assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4) @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_lstm_sym(): T, N, I, H = 5, 32, 800, 800 fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.LSTMCell(H, prefix='l0_')) stack.add(mx.rnn.LSTMCell(H, prefix='l1_')) stack.add(mx.rnn.LSTMCell(H, prefix='l2_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write') check_rnn_consistency(fused, stack, T, N, I, H, 'add') check_rnn_consistency(fused, stack, T, N, I, H, 'null') @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_lstm_bidirectional(): T, N, I, H = 5, 20, 800, 800 fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm', bidirectional=True, get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.BidirectionalCell( mx.rnn.LSTMCell(H, prefix='l0_'), mx.rnn.LSTMCell(H, prefix='r0_'), output_prefix='bi_lstm_0_')) stack.add(mx.rnn.BidirectionalCell( mx.rnn.LSTMCell(H, prefix='l1_'), mx.rnn.LSTMCell(H, prefix='r1_'), output_prefix='bi_lstm_1_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write') check_rnn_consistency(fused, stack, T, N, I, H, 'add') check_rnn_consistency(fused, stack, T, N, I, H, 'null') check_rnn_consistency(fused, stack, T, N, I, H, {'data': 'add', 'parameters': 'null'}) @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_gru_sym(): T, N, I, H = 5, 32, 800, 800 fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.GRUCell(H, prefix='l0_')) stack.add(mx.rnn.GRUCell(H, prefix='l1_')) stack.add(mx.rnn.GRUCell(H, prefix='l2_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write') check_rnn_consistency(fused, stack, T, N, I, H, 'add') check_rnn_consistency(fused, stack, T, N, I, H, 'null') @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_gru_bidirectional(): T, N, I, H = 5, 20, 800, 800 fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru', bidirectional=True, get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.BidirectionalCell( mx.rnn.GRUCell(H, prefix='l0_'), mx.rnn.GRUCell(H, prefix='r0_'), output_prefix='bi_gru_0_')) stack.add(mx.rnn.BidirectionalCell( mx.rnn.GRUCell(H, prefix='l1_'), mx.rnn.GRUCell(H, prefix='r1_'), output_prefix='bi_gru_1_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write') check_rnn_consistency(fused, stack, T, N, I, H, 'add') check_rnn_consistency(fused, stack, T, N, I, H, 'null') @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnntanh_sym(): T, N, I, H = 5, 32, 800, 800 fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_')) stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_')) stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write') check_rnn_consistency(fused, stack, T, N, I, H, 'add') check_rnn_consistency(fused, stack, T, N, I, H, 'null') @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnntanh_bidirectional(): T, N, I, H = 5, 20, 800, 800 fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh', bidirectional=True, get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.BidirectionalCell( mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'), mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'), output_prefix='bi_rnntanh_0_')) stack.add(mx.rnn.BidirectionalCell( mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'), mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'), output_prefix='bi_rnntanh_1_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write') check_rnn_consistency(fused, stack, T, N, I, H, 'add') check_rnn_consistency(fused, stack, T, N, I, H, 'null') @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnnrelu_sym(): T, N, I, H = 5, 32, 200, 200 fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_')) stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_')) stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write') check_rnn_consistency(fused, stack, T, N, I, H, 'add') check_rnn_consistency(fused, stack, T, N, I, H, 'null') @with_seed() @assert_raises_cudnn_not_satisfied(min_version='5.1.10') def test_rnnrelu_bidirectional(): T, N, I, H = 5, 20, 200, 200 fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu', bidirectional=True, get_next_state=True, prefix='') stack = mx.rnn.SequentialRNNCell() stack.add(mx.rnn.BidirectionalCell( mx.rnn.RNNCell(H, activation='relu', prefix='l0_'), mx.rnn.RNNCell(H, activation='relu', prefix='r0_'), output_prefix='bi_rnnrelu_0_')) stack.add(mx.rnn.BidirectionalCell( mx.rnn.RNNCell(H, activation='relu', prefix='l1_'), mx.rnn.RNNCell(H, activation='relu', prefix='r1_'), output_prefix='bi_rnnrelu_1_')) check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2) check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2) check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2) @with_seed() def test_lstm_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') CX = mx.sym.Variable('state_cell') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX, state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM') exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() @with_seed() def test_gru_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU') exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() @with_seed() def test_rnntanh_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH') exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() @with_seed() def test_rnnrelu_dropout(): X = mx.sym.Variable('x') Params = mx.sym.Variable('params') HX = mx.sym.Variable('state') T, N, I, H = 300, 20, 800, 800 rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU') exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I)) out = exe.forward(is_train=True) out[0].wait_to_read() def test_RNN_float64(): if default_context().device_type == 'gpu': return sym = mx.sym.RNN( mx.sym.Variable('in'), mx.sym.Variable('par'), mx.sym.Variable('s'), state_size = (2), num_layers = 1, mode = 'rnn_tanh' ) dtype = 'float64' explicit_grad = { 'in': mx.nd.ones([2, 1, 2], dtype=dtype), 'par': mx.nd.ones([12], dtype=dtype), 's': mx.nd.ones([1, 1, 2], dtype=dtype) } args_grad = explicit_grad grad_req = 'write' ex = sym.bind(default_context(), { 'in': mx.nd.ones([2, 1, 2], dtype=dtype), 'par': mx.nd.ones([12], dtype=dtype), 's': mx.nd.ones([1, 1, 2], dtype=dtype) }, args_grad = args_grad, grad_req = grad_req ) ex.forward() ex.outputs[0].wait_to_read() def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) x /= np.sum(x, axis=axis, keepdims=True) return x def check_elementwise_sum_with_shape(shape, n): # forward inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)] out = mx.symbol.ElementWiseSum(*inputs, name='esum') arr = [mx.nd.empty(shape) for i in range(n)] arr_grad = [mx.nd.empty(shape) for i in range(n)] for i in range(n): arr[i][:] = np.random.uniform(-10, 10, shape) exec1 = out.bind(default_context(), args=arr, args_grad=arr_grad) exec1.forward(is_train=True) out1 = exec1.outputs[0] out = sum(a.asnumpy() for a in arr) assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5) out_grad = mx.nd.empty(shape) out_grad[:] = np.random.uniform(-10, 10, shape) # backward exec1.backward([out_grad]) for a in arr_grad: assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5) @with_seed() def test_elementwise_sum(): nrepeat = 2 maxdim = 4 for repeat in range(nrepeat): for dim in range(1, maxdim): shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim)) check_elementwise_sum_with_shape(shape, np.random.randint(1, 8)) def check_concat_with_shape(shapes, dimension, skip_second): # if skip_second is True, second argument will not have gradient. # it is to test #1130 n = len(shapes) # forward target_dim = 0 for shape in shapes: target_dim += shape[dimension] inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)] out = mx.symbol.Concat(*inputs, name='conc',dim=dimension) arr = [mx.nd.empty(shape) for shape in shapes] for i in range(n): arr[i][:] = shapes[i][dimension] arr_np = [np.copy(narray.asnumpy()) for narray in arr] arr_grad = [mx.nd.empty(shape) for shape in shapes] dict_grad = {} arg_names = out.list_arguments() for name, g in zip(arg_names, arr_grad): if not skip_second or name != 'arg1': dict_grad[name] = g args = out.list_arguments() arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes))) out_grad = mx.nd.empty(out_shapes[0]) exec1 = out.bind(default_context(), args=arr, args_grad=dict_grad) exec1.forward(is_train=True) out1 = exec1.outputs[0] ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension) assert_almost_equal(out1, ret) # backward out1.copyto(out_grad) out_grad[:] += 1 exec1.backward([out_grad]) for i, name in enumerate(arg_names): if not skip_second or name != 'arg1': grad = dict_grad[name] np_grad = arr_np[i] assert_almost_equal(grad, np_grad + 1) @with_seed() def test_concat(): for dimension in range(4): n = 2 merge = [2, 3, 4, 5, 6] a = 2 b = 3 c = 4 # test 2D if dimension<2: for dim in range(2, 6): shapes = [] for i in range(dim): if dimension == 0: shapes.append((merge[i], a)) elif dimension == 1: shapes.append((a, merge[i])) check_concat_with_shape(shapes,dimension,True) check_concat_with_shape(shapes,dimension,False) # Test negative dim check_concat_with_shape(shapes, dimension - 2, True) check_concat_with_shape(shapes, dimension - 2, False) #test 3D if dimension<3: for dim in range(2, 6): shapes = [] for i in range(dim): if dimension == 0: shapes.append((merge[i], a,b)) elif dimension ==1: shapes.append((a,merge[i],b)) elif dimension ==2: shapes.append((a,b,merge[i])) check_concat_with_shape(shapes,dimension,True) check_concat_with_shape(shapes,dimension,False) # Test negative dim check_concat_with_shape(shapes, dimension - 3, True) check_concat_with_shape(shapes, dimension - 3, False) # test 4D for dim in range(2, 6): shapes = [] for i in range(dim): if dimension == 0: shapes.append((merge[i],a,b,c)) elif dimension == 1: shapes.append((a,merge[i],b,c)) elif dimension ==2: shapes.append((a,b,merge[i],c)) elif dimension ==3: shapes.append((a,b,c,merge[i])) check_concat_with_shape(shapes,dimension,True) check_concat_with_shape(shapes,dimension,False) # Test negative dim check_concat_with_shape(shapes, dimension - 4, True) check_concat_with_shape(shapes, dimension - 4, False) @with_seed() def test_slice_channel(): def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis): ins = [] if squeeze_axis: shape = np.random.randint(2, 5, data_ndim).tolist() shape[axis] = num_outputs out_ele_shape = [ele for ele in shape] del out_ele_shape[axis] else: shape = np.random.randint(1, 5, data_ndim).tolist() shape[axis] *= num_outputs out_ele_shape = [ele for ele in shape] out_ele_shape[axis] //= num_outputs data_npy = np.random.normal(size=shape) out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)] data = mx.sym.Variable('data') sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis) exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape) assert len(exe.outputs) == num_outputs outputs = exe.forward(is_train=True, data=data_npy) for i in range(num_outputs): gt = data_npy.take(np.arange(i * shape[axis]/num_outputs, (i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis) if squeeze_axis: assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape)) else: assert_almost_equal(outputs[i], gt) # test backward exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy]) if squeeze_axis: assert_almost_equal(exe.grad_arrays[0], np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy], axis=axis)) else: assert_almost_equal(exe.grad_arrays[0], np.concatenate(out_grads_npy, axis=axis)) check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True) check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False) check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False) check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True) @with_seed() def test_regression(): ''' test regression operator ''' def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]): # init executor data = mx.symbol.Variable('data') label = mx.symbol.Variable('label', stype=stype) out = symbol(data, label) grad_req = {'data': 'write', 'label': 'null'} out_exec = out.simple_bind(default_context(), grad_req=grad_req, data=shape, label=shape) arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays)) grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays)) # init data arr_data = mx.random.uniform(-1, 1, shape) arg_map["data"][:] = arr_data # init label based on density arr_label = arg_map["label"] atol = 1e-5 for density in densities: arr_label[:] = rand_ndarray(shape, stype, density=density) out_exec.forward(is_train=True) out_exec.backward() np_out = forward(arr_data.asnumpy()) out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1] assert_almost_equal(out_exec.outputs[0], np_out, atol=atol) assert_almost_equal(grad_map["data"], out_grad, atol=atol) shape = (50, 30) check_regression(mx.symbol.LogisticRegressionOutput, lambda x: 1.0 / (1.0 + np.exp(-x)), lambda x, y : x - y, shape) check_regression(mx.symbol.LinearRegressionOutput, lambda x: x, lambda x, y : x - y, shape) check_regression(mx.symbol.MAERegressionOutput, lambda x: x, lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)), shape) check_regression(mx.symbol.LogisticRegressionOutput, lambda x: 1.0 / (1.0 + np.exp(-x)), lambda x, y : x - y, shape, stype='csr') check_regression(mx.symbol.LinearRegressionOutput, lambda x: x, lambda x, y : x - y, shape, stype='csr') def check_softmax_grad(xpu): x = mx.sym.Variable('x') label = mx.sym.Variable('label') x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu) grad_x = mx.nd.zeros((1,4), ctx=xpu) label_nd = mx.nd.array([1], ctx=xpu) sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False) ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x}) ex.forward(is_train=True) softmax_out = ex.outputs[0].asnumpy() expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]] assert np.isclose(softmax_out, expected_softmax_out).all() ex.backward(is_train=True) grad_out = ex.grad_arrays[0].asnumpy() k = int(label_nd[0].asscalar()) expected_grad_out = np.zeros((1,4)) expected_grad_out[0, k] = -1 assert np.isclose(grad_out - softmax_out, expected_grad_out).all() def check_smoothed_softmax_grad(xpu): alpha = 0.2 x = mx.sym.Variable('x') label = mx.sym.Variable('label') x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu) grad_x = mx.nd.zeros((1,4), ctx=xpu) label_nd = mx.nd.array([1], ctx=xpu) sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha) ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x}) ex.forward(is_train=True) softmax_out = ex.outputs[0].asnumpy() expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]] assert np.isclose(softmax_out, expected_softmax_out).all() ex.backward(is_train=True) grad_out = ex.grad_arrays[0].asnumpy() k = int(label_nd[0].asscalar()) expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1)) expected_grad_out[0, k] = - (1 - alpha) assert np.isclose(grad_out - softmax_out, expected_grad_out).all() def check_softmax_with_ignore_label(xpu): X = mx.symbol.Variable('X') L = mx.symbol.Variable('L') Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True) shape = (20, 10) x = mx.nd.empty(shape, ctx = xpu) l = mx.nd.empty((shape[0],), ctx = xpu) x_np = np.random.rand(*shape) l_np = np.random.randint(0, shape[1]-1, (shape[0],)) x[:] = x_np l[:] = l_np grad = mx.nd.empty(shape, ctx = xpu) exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad}) exec1.forward(is_train=True) exec1.backward() grad0 = grad.asnumpy() for i in range(int(shape[0]/2)): l_np[i] = 0 l[:] = l_np exec1.forward(is_train=True) exec1.backward() grad1 = grad.asnumpy() assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5 assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):]) def check_softmax_with_shape(shape, xpu, preserve_shape=False): # bind with label X = mx.symbol.Variable('X') L = mx.symbol.Variable('L') Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape) x = mx.random.uniform(-1, 1, shape, ctx=xpu) l = mx.random.uniform(-1, 1, shape, ctx=xpu) l[:] = np_softmax(l.asnumpy()) grad = mx.nd.empty(shape, ctx = xpu) exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad}) exec1.forward(is_train=True) out = exec1.outputs[0].asnumpy() # Non-zero atol required by test_softmax with seed 781663739 rtol = 1e-4 atol = 1e-6 assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol) exec1.backward() assert_almost_equal(grad, np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol) def test_python_op(): X = mx.symbol.Variable('X') op = mx.operator.NumpyOp() s = op.get_symbol(X, name='numpy_op') x = mx.ndarray.ones((10))*10 dx = mx.ndarray.zeros((10)) dy = mx.ndarray.ones((10)) exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx}) exec1.forward(is_train=True) assert_almost_equal(x, exec1.outputs[0]) exec1.backward(dy) assert_almost_equal(dy, dx) def test_swapaxes(): data = mx.symbol.Variable('data') shape = (2, 3, 4) data_tmp = np.ones(shape) data_tmp[0] = 1 data_tmp[1] = 2 arr_data = mx.nd.array(data_tmp) swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2) swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2) exe_c = swap.bind(default_context(), args=[arr_data]) exe_c.forward(is_train=True) out = exe_c.outputs[0] swap0_ = np.swapaxes(data_tmp, 0, 2) swap_ = np.swapaxes(swap0_, 1, 2) assert_almost_equal(out, swap_) @with_seed() def test_scalarop(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape)*5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0)) npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0)) npout = 2/npout_1 check_symbolic_forward(test, [data_tmp], [npout]) npout_grad = 2.*2/5 npout_grad = 2*npout_grad /(npout_1 *npout_1 ) check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad]) @with_seed() def test_scalar_pow(): data = mx.symbol.Variable('data') shape = (1, 1) data_tmp = np.ones(shape) test = data ** 2 check_numeric_gradient(test, [data_tmp]) check_symbolic_forward(test, [data_tmp], [data_tmp ** 2]) check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp]) @with_seed() def test_symbol_pow(): shape = (1, 1) data = mx.symbol.Variable('data') data_tmp = np.ones(shape)*2 exp = mx.symbol.Variable('exp') exp_tmp = np.ones(shape)*3 test = data**exp check_numeric_gradient(test, [data_tmp, exp_tmp]) check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp]) data_dir = data_tmp**(exp_tmp - 1) * exp_tmp exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp) check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir]) @with_seed() def test_fully_connected(): data = mx.sym.var("data") fc_weight = mx.sym.var("weight") fc_bias = mx.sym.var("bias") fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc') data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32) fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32) fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32) fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32) data_np = data.asnumpy().reshape(5, 325) fc_weight_np = np.transpose(fc_weight.asnumpy()) fc_bias_np = fc_bias.asnumpy() res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy() check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res}) check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, numeric_eps=1e-2, rtol=1e-4, atol=1e-2) # TODO: Fix Bug #15032 when bias has ndim > 1 #check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res}) @with_seed() def test_pow_fn(): shape = (3, 4) exp = mx.symbol.Variable("exp") x = np.ones(shape)*3 for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]: check_numeric_gradient(y, [x], numeric_eps=1E-3) check_symbolic_forward(y, [x], [2**x]) check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x]) @with_seed() def test_relu(): def frelu(x): return np.maximum(x, 0.0) def frelu_grad(x): return 1.0 * (x > 0.0) shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.relu(x) xa = np.random.uniform(low=-1.0,high=1.0,size=shape) eps = 1e-4 # Avoid finite difference method inaccuracies due to discontinuous gradient at the origin. # Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195. xa[abs(xa) < eps] = 1.0 ya = frelu(xa) ga = frelu_grad(xa) check_numeric_gradient(y, [xa], numeric_eps=eps) check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ga]) # NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues, # the analytical checks are still performed on each and every data type to verify the correctness. @with_seed() def test_leaky_relu(): def fleaky_relu(x, act_type, slope=0.25): neg_indices = x < 0 out = x.copy() if act_type == 'elu': out[neg_indices] = slope * np.expm1(out[neg_indices]) elif act_type == 'leaky': out[neg_indices] = slope * out[neg_indices] return out def fleaky_relu_grad(grad, x, y, act_type, slope=0.25): neg_indices = x < 0 out = np.ones(x.shape) if act_type == 'elu': out[neg_indices] = y[neg_indices] + slope elif act_type == 'leaky': out[neg_indices] = slope return out * grad for ndim in range(1, 4): shape = rand_shape_nd(ndim) x = mx.symbol.Variable("x") slp = 0.25 for dtype in [np.float16, np.float32, np.float64]: xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype) eps = 1e-4 rtol = 1e-2 atol = 1e-3 xa[abs(xa) < eps] = 1.0 for act_type in ['elu', 'leaky']: y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type) ya = fleaky_relu(xa, slope=slp, act_type=act_type) ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type) # Skip numeric check for float16 type to get rid of flaky behavior if dtype is not np.float16: check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype) # NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues, # the analytical checks are still performed on each and every data type to verify the correctness. @with_seed() def test_prelu(): def fprelu(x, gamma): pos_indices = x > 0 out = x.copy() if len(x.shape) == 4: out = out.transpose(2,3,0,1) out = np.multiply(out, gamma) out = out.transpose(2,3,0,1) else: out = np.multiply(out, gamma) out[pos_indices] = x[pos_indices] return out def fprelu_grad(x, y, gamma): pos_indices = x > 0 if len(x.shape) == 4: grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma) grad_x = grad_x.transpose(2,3,0,1) else: grad_x = np.multiply(np.ones(x.shape), gamma) grad_gam = np.zeros(gamma.shape) copy_x = x.copy() copy_x[pos_indices] = 0.0 grad_x[pos_indices] = 1.0 if len(gamma.shape) > 1 and len(x.shape) != 4: grad_gam = copy_x elif len(gamma.shape) > 1 and len(x.shape) == 4: grad_gam = np.sum(copy_x, axis=(2,3)) elif gamma.shape[0] == 1: grad_gam = np.sum(np.sum(copy_x)) elif gamma.shape[0] > 1 and len(x.shape) != 4: grad_gam = np.sum(copy_x, axis=0) elif gamma.shape[0] > 1 and len(x.shape) == 4: grad_gam = np.sum(copy_x, axis=(0,2,3)) return (grad_x, grad_gam) x = mx.symbol.Variable("x") gamma = mx.symbol.Variable("gamma") for shape in [(3,4), (3,4,4,5)]: for dtype in [np.float16, np.float32, np.float64]: for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]: gam_full = np.array([gam, gam, gam]) xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype) rtol = 1e-2 atol = 1e-3 eps = 1e-4 xa[abs(xa) < eps] = 1.0 y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu') ya = fprelu(xa, gam) ya_full = fprelu(xa, gam_full) g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam) g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full) # Skip numeric check for float16 type to get rid of flaky behavior if dtype is not np.float16: check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)], [g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype) @with_seed() def test_selu(): alpha = 1.6732632423543772848170429916717 lamb = 1.0507009873554804934193349852946 def fselu(x): neg_indices = x < 0 out = x.copy() out[neg_indices] = alpha * np.expm1(out[neg_indices]) return out * lamb def fselu_grad(grad, x, y): neg_indices = x < 0 out = np.ones(x.shape).astype(x.dtype) out[neg_indices] = y[neg_indices] + alpha return out * lamb shape = (3, 4) x = mx.sym.Variable("x") y = mx.sym.LeakyReLU(data=x, act_type="selu") for dtype in [np.float16, np.float32, np.float64]: xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype) eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4) if dtype is np.float16: xa /= 10.0 xa[abs(xa) < eps] = 0.01 ya = fselu(xa) ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya) check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype) @with_seed() def test_gelu(): CUBE_CONSTANT = 0.044715 ROOT_TWO_OVER_PI = 0.7978845608028654 def g(x): return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3)) def g_grad(x): return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2)) def f(x): return 1.0 + np.tanh(g(x)) def f_grad(x): return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x) def fgelu(x): return 0.5 * x * f(x) def fgelu_grad(grad, x, y): return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x)) shape = (3, 4) x = mx.sym.Variable("x") y = mx.sym.LeakyReLU(data=x, act_type="gelu") for dtype in [np.float16, np.float32, np.float64]: xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype) eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5) if dtype is np.float16: xa /= 10.0 xa[abs(xa) < eps] = 0.01 ya = fgelu(xa) ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya) check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype) @with_seed() def test_sigmoid(): def fsigmoid(a): return np.divide(1.0, (1.0 + np.exp(-a))) shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.sigmoid(x) xa = np.random.uniform(low=-1.0,high=1.0,size=shape) ya = fsigmoid(xa) check_numeric_gradient(y, [xa], numeric_eps=1E-3) check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)]) @with_seed() def test_shape_array(): for i in range(1,6): shape = rand_shape_nd(i) x = mx.sym.var('x') y = mx.sym.shape_array(x) xa = mx.nd.array(np.random.ranf(shape)) xg = mx.nd.empty(xa.shape) ya = np.shape(xa) yg = mx.nd.ones(ya) exe = y.bind(ctx=default_context(), args={'x': xa}, args_grad={'x': xg}) exe.forward(is_train=True) exe.backward([yg]) yo = exe.outputs[0].asnumpy() same(yo, ya) assert_almost_equal(xg, np.zeros_like(xg.asnumpy())) @with_seed() def test_size_array(): for i in range(1,6): shape = rand_shape_nd(i) x = mx.sym.var('x') y = mx.sym.size_array(x) xa = mx.nd.array(np.random.ranf(shape)) xg = mx.nd.empty(xa.shape) ya = np.size(xa) yg = mx.nd.ones(ya) exe = y.bind(ctx=default_context(), args={'x': xa}, args_grad={'x': xg}) exe.forward(is_train=True) exe.backward([yg]) yo = exe.outputs[0].asnumpy() same(yo, ya) assert_almost_equal(xg, np.zeros_like(xg.asnumpy())) @with_seed() def test_hard_sigmoid(): def fhardsigmoid(a, alpha=0.2, beta=0.5): return np.maximum(np.zeros(a.shape, dtype=a.dtype), np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta)) def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5): orig_out = fhardsigmoid(a, alpha, beta) res = out_grad * alpha res[orig_out <= 0.0] = 0.0 res[orig_out >= 1.0] = 0.0 return res shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.hard_sigmoid(x) for dtype in [np.float16, np.float32, np.float64]: if dtype is np.float16: rtol = 1e-2 else: rtol = 1e-3 atol = 1e-3 eps = 1e-3 xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype) # function not differentiable at x=2.5 and -2.5 xa[abs(xa-2.5) < eps] -= 2 * eps xa[abs(xa+2.5) < eps] += 2 * eps ya = fhardsigmoid(xa) grad_xa = fhardsigmoid_grad(xa, np.ones(shape)) if dtype is not np.float16: check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype) check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype) check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype) @with_seed() def test_softsign(): def fsoftsign(a): return np.divide(a, (1.0 + np.abs(a))) def fsoftsign_grad(a): return np.divide(1.0, np.square((1.0 + np.abs(a)))) shape = (3, 4) x = mx.symbol.Variable("x") y = mx.sym.softsign(x) xa = np.random.uniform(low=-1.0,high=1.0,size=shape) ya = fsoftsign(xa) ya_grad = fsoftsign_grad(xa) check_numeric_gradient(y, [xa], numeric_eps=1E-3) check_symbolic_forward(y, [xa], [ya]) check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad]) @with_seed() def test_binary_logic(): def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True): x = mx.symbol.Variable("x") y = mx.symbol.Variable("y") z = logic_sym(x, y) x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32) y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32) exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape) mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0] assert_almost_equal(mx_out, forward_gt(x_npy, y_npy)) exe.backward() if test_scalar: z_lscalar = logic_sym(1, y) z_rscalar = logic_sym(x, 1) exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape) exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape) mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0] mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0] assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy)) assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1)) exe_lscalar.backward() exe_rscalar.backward() # Test the no-broadcasting binary logic ops + scalar logic ops _inner_test(forward_gt=lambda x, y: x == y, logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x > y, logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x >= y, logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x < y, logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x <= y, logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10)) _inner_test(forward_gt=lambda x, y: x != y, logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10)) # Test the broadcasting binary logic ops _inner_test(forward_gt=lambda x, y: x == y, logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x > y, logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x >= y, logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x < y, logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x <= y, logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) _inner_test(forward_gt=lambda x, y: x != y, logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y), x_shape=(1, 10), y_shape=(10, 1), test_scalar=False) @with_seed() def test_unary_logic(): def reference(a, dtype): return np.logical_not(a).astype(dtype) shape = (3, 4) xa = np.random.randint(-2, 2, size=shape).astype(np.float32) mx_xa = mx.nd.array(xa) mx_out = mx.nd.logical_not(mx_xa) assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype)) x = mx.sym.Variable('x') y = mx.sym.logical_not(data=x) exe = y.simple_bind(ctx=default_context(), x=shape) sym_out = exe.forward(is_train=True, x=mx_xa)[0] assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype)) @with_seed() def test_embedding(): in_dim = 10 out_dim = 4 batch = 24 data = mx.sym.Variable("data") embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed") exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,)) arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays)) grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays)) np_data = np.random.randint(low=0, high=in_dim, size=batch) np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape) np_onehot = np.zeros((batch, in_dim)) np_onehot[np.arange(batch), np_data] = 1.0 # forward arg_map["data"][:] = np_data arg_map["embed_weight"][:] = np_weight exe_test.forward(is_train=True) # Non-zero atol required, as exposed by seed 781663739 rtol = 1e-5 atol = 1e-5 assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol) # backward np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape) grad = mx.nd.zeros(np_grad.shape) grad[:] = np_grad exe_test.backward([grad]) assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol) # check ops handle duplicate input correctly. @with_seed() def test_binary_op_duplicate_input(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:] = 5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:] = 3 out_grad = mx.nd.empty(shape) out_grad[:] = 1 square = data * data exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_square.forward(is_train=True) assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp) exe_square.backward(out_grad) assert_almost_equal(arr_grad, 2.0 * data_tmp) @with_seed() def test_sign(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = mx.sym.sign(data) exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.sign(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2; npout_grad = out_grad.asnumpy() npout_grad = 0; exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) @with_seed() def test_round_ceil_floor(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5.543 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]= 2 test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data) exe_test = test.bind(default_context(), args=[arr_data]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp) assert_almost_equal(out, npout) @with_seed() def test_trunc(): data_tmp = np.random.rand(3, 4) * 10 - 5 arr_data = mx.nd.array(data_tmp) data = mx.symbol.Variable('data') test = mx.sym.trunc(data) exe_test = test.bind(default_context(), args=[arr_data]) exe_test.forward(is_train=True) out = exe_test.outputs[0] # 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. # Repro issue with seed 1660190454 npout = np.trunc(np.float32(data_tmp)) assert_almost_equal(out, npout) @with_seed() def test_rsqrt_cos_sin(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data) exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2 npout_grad = out_grad.asnumpy() npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp) exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) @with_seed() def test_maximum_minimum(): data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') shape = (3, 4) data_tmp1 = np.random.rand(3,4) data_tmp2 = np.random.rand(3,4) data_tmp1[:] = 2 data_tmp2[:] = 3 arr_data1 = mx.nd.array(data_tmp1) arr_data2 = mx.nd.array(data_tmp2) arr_grad1 = mx.nd.empty(shape) arr_grad2 = mx.nd.empty(shape) test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2) exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2 exe_test.backward(out_grad) npout_grad = np.ones(shape) npout_grad[:] = 2 mask1 = (data_tmp1 > data_tmp2).astype('float') mask2 = (data_tmp1 < data_tmp2).astype('float') npout_grad1 = npout_grad * mask1 + npout_grad * mask2 npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2) assert_almost_equal(arr_grad1, npout_grad1) assert_almost_equal(arr_grad2, npout_grad2) @with_seed() def test_maximum_minimum_scalar(): data1 = mx.symbol.Variable('data') shape = (3, 4) data_tmp1 = np.random.rand(3,4) data_tmp1[:] = 2 arr_data1 = mx.nd.array(data_tmp1) arr_grad1 = mx.nd.empty(shape) test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4) exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2 exe_test.backward(out_grad) npout_grad = np.ones(shape) npout_grad[:] = 2 mask1 = (data_tmp1 > 3).astype('float') mask2 = (9 > data_tmp1).astype('float') mask3 = (5 < data_tmp1).astype('float') mask4 = (data_tmp1 < 4).astype('float') npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4 assert_almost_equal(arr_grad1, npout_grad1) @with_seed() def test_abs(): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:]=5 arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:]=3 test = mx.sym.abs(data) exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = abs(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = 2; npout_grad = out_grad.asnumpy() npout_grad = npout_grad * np.sign(data_tmp) exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad): """configure A: input --> conv --> deconv --> output. the convolution and deconvoluiton has similar parameter which ensure the input shape is the same as output, and the same weights between conv and deconv; If the input value of forward() and backwrad() is the same, then the output value of them should also the same; """ assert input_shape[1] == num_filter data = mx.sym.Variable(name="data") conv = mx.sym.Convolution( data=data, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "conv") deconv = mx.sym.Deconvolution( data=conv, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "deconv") arg_names = deconv.list_arguments() arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape) input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context()) out_grad = input_data args = {} args["data"] = input_data args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1, (num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context()) args_grad = [mx.nd.empty(s) for s in arg_shapes] exe = deconv.bind(default_context(), args=args, args_grad=args_grad) exe.forward(is_train=True) out = exe.outputs[0] exe.backward(out_grad) assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3) args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes] args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy] exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add") exe.forward(is_train=True) out = exe.outputs[0].asnumpy() exe.backward(out_grad) assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3) def check_deconvolution_gradient(input_shape, num_filter, pad): """configure A: input --> conv --> output. configure B: input --> deconv --> output the convolution and deconvoluiton has similar parameter which ensure the input shape is the same as output; During backward(), if the input of A equals output of B, and the output of A equals input of B, then the grad of weight should be the same; """ ndim = len(pad) stride = (1,) * ndim kernel = tuple(2 * np.array(pad) + 1) data_conv = mx.sym.Variable(name="data_conv") conv = mx.sym.Convolution( data=data_conv, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "conv") data_deconv = mx.sym.Variable(name="data_deconv") deconv = mx.sym.Deconvolution( data=data_deconv, kernel=kernel, stride=stride, pad=pad, num_filter=num_filter, no_bias = "true", name = "deconv") conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context()) conv_args = {} conv_args["data_conv"] = conv_data conv_args['conv_weight'] = \ mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context()) conv_args_grad = [mx.nd.zeros(conv_data.shape), mx.nd.zeros((num_filter, input_shape[1]) + kernel)] exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad) exe_conv.forward(is_train=True) conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context()) exe_conv.backward(conv_out_grad) deconv_data = conv_out_grad deconv_args = {} deconv_args['data_deconv'] = deconv_data deconv_args['deconv_weight'] = conv_args['conv_weight'] deconv_args_grad = [mx.nd.zeros(deconv_data.shape), mx.nd.zeros((num_filter, input_shape[1]) + kernel)] deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape), np.random.normal(size=(num_filter, input_shape[1]) + kernel)] deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]), mx.nd.array(deconv_addto_args_grad_npy[1])] exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad) exe_deconv.forward(is_train=True) deconv_out_grad = conv_data[:] exe_deconv.backward(deconv_out_grad) assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2) # Test AddTo exe_deconv_addto = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_addto_args_grad, grad_req="add") exe_deconv_addto.forward(is_train=True) deconv_out_grad = conv_data[:] exe_deconv_addto.backward(deconv_out_grad) assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1], deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2) def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None): data = mx.sym.Variable(name="data") if target_shape: deconv = mx.sym.Deconvolution( data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5, target_shape = target_shape) else: deconv = mx.sym.Deconvolution( data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5) arg_names = deconv.list_arguments() arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape) default_target_size = 8 if target_shape is None: target_shape = (default_target_size,) * len(kernel) assert out_shapes[0] == (input_shape[0], 5) + target_shape @with_seed() def test_deconvolution(): # 2D check_deconvolution_target_shape( input_shape = (2,3,4,4), kernel = (3,3), stride = (2,2), target_shape = (8,8), pad = (99,99), # will be ignored adj = (101,101), # will be ignored ) check_deconvolution_target_shape( input_shape = (2,3,4,4), kernel = (3,3), stride = (2,2), pad = (1,1), adj = (1,1), ) check_deconvolution_forward_backward( input_shape = (1,1,5,5), num_filter = 1, kernel = (3,3), stride = (1,1), pad = (1,1) ) check_deconvolution_forward_backward( input_shape = (32,3,28,28), num_filter = 3, kernel = (3,3), stride = (1,1), pad = (1,1) ) check_deconvolution_forward_backward( input_shape = (10, 3, 403, 403), num_filter = 3, kernel = (7,7), stride = (5,5), pad = (2,2) ) check_deconvolution_gradient( input_shape = (1,3,5,5), num_filter = 3, pad = (1,1) ) check_deconvolution_gradient( input_shape = (5,3,100,100), num_filter = 3, pad = (3,3) ) # 1D check_deconvolution_target_shape( input_shape = (2,3,4), kernel = (3,), stride = (2,), target_shape = (8,), pad = (99,), # will be ignored adj = (101,), # will be ignored ) check_deconvolution_target_shape( input_shape = (2,3,4), kernel = (3,), stride = (2,), pad = (1,), adj = (1,), ) check_deconvolution_forward_backward( input_shape = (1,1,5), num_filter = 1, kernel = (3,), stride = (1,), pad = (1,) ) check_deconvolution_forward_backward( input_shape = (32,3,28), num_filter = 3, kernel = (3,), stride = (1,), pad = (1,) ) check_deconvolution_forward_backward( input_shape = (10, 3, 403), num_filter = 3, kernel = (7,), stride = (5,), pad = (2,) ) check_deconvolution_gradient( input_shape = (1,3,5), num_filter = 3, pad = (1,) ) check_deconvolution_gradient( input_shape = (5,3,100), num_filter = 3, pad = (3,) ) @with_seed() def test_deconvolution_forward_with_bias(): """Check if deconvolution forward can work well with bias=True """ def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)): x = mx.sym.Variable('x') w = mx.sym.Variable('w') input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu()) y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad) exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null') exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape) exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape) exe.forward(is_train=False) o = exe.outputs[0] t = o.asnumpy() check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,)) check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,)) check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1)) check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1)) def check_nearest_upsampling_with_shape(shapes, scale, root_scale): arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)} arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)} up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale) exe = up.bind(default_context(), args=arr, args_grad=arr_grad) exe.forward(is_train=True) exe.backward(exe.outputs) for k in range(len(shapes)): name = 'arg_%d'%k assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4) def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter): def _init_bilinear(arr, f): weight = np.zeros(np.prod(arr.shape), dtype='float32') shape = arr.shape c = (2 * f - 1 - f % 2) / (2. * f) for i in range(np.prod(shape)): x = i % shape[3] y = (i // shape[3]) % shape[2] weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) arr[:] = weight.reshape(shape) return arr up = mx.sym.UpSampling(mx.sym.Variable("data"), mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale, num_filter=num_filter, num_args=2) arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape) arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()), 'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))} arr_grad = [mx.nd.empty(s) for s in arg_shapes] exe = up.bind(default_context(), args=arr, args_grad=arr_grad) exe.forward(is_train=True) out = exe.outputs[0].asnumpy() exe.backward(exe.outputs) target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale) assert out.shape == data_shape[:2] + target_shape @with_seed() def test_nearest_upsampling(): for root_scale in [1,2,3]: for scale in [1,2,3]: for num_shape in [1,2,3]: for base in [1,2,3]: shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)] check_nearest_upsampling_with_shape(shapes, scale, root_scale) @with_seed() def test_bilinear_upsampling(): rootscale = [2,3] scales = [1,2,3] filters = [1,2,3] bases = [1,2,3] for params in itertools.product(rootscale, scales, filters, bases): root_scale, scale, num_filter, base = params # bilinear upsampling takes only 1 data and 1 weight # multi input mode is not applicable dimension = base*root_scale*scale kernel = 2 * root_scale - root_scale % 2 data_shape = (1, num_filter, dimension, dimension) weight_shape = (1, num_filter, kernel, kernel) check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter) @with_seed() def test_batchnorm_training(): def check_batchnorm_training(stype): for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]: data_tmp = np.random.normal(-0.1, 0.1, size=shape) s = shape[1], gamma = np.ones(s) beta = np.ones(s) gamma[1] = 3 beta[0] = 3 rolling_mean = np.random.uniform(size=s) rolling_std = np.random.uniform(size=s) data = mx.symbol.Variable('data', stype=stype) in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype), mx.nd.array(beta).tostype(stype)] mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)] test = mx.symbol.BatchNorm_v1(data, fix_gamma=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm(data, fix_gamma=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm_v1(data, fix_gamma=False) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm(data, fix_gamma=False) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) # Test varying channel axis dim = len(shape) for chaxis in range(-dim, dim): chaxis_true = chaxis if chaxis < 0: chaxis_true = dim + chaxis shapex = shape channel_count = shapex[chaxis_true] data_tmp = np.random.normal(-0.1, 0.1, size=shapex) gamma = np.ones(channel_count) beta = np.ones(channel_count) if channel_count > 1: gamma[1] = 3 beta[0] = 3 in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype), mx.nd.array(beta).tostype(stype)] xrolling_mean = np.random.uniform(size=channel_count) xrolling_std = np.random.uniform(size=channel_count) xmean_std = [mx.nd.array(xrolling_mean).tostype(stype), mx.nd.array(xrolling_std).tostype(stype)] test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis) check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01) check_batchnorm_training('default') @with_seed() def test_batchnorm(): momentum = 0.9 epsilon = 1e-5 def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var): print(str((op, shape, axis, cudnn_off))) kwargs = dict(output_mean_var=output_mean_var) if op == mx.nd.contrib.SyncBatchNorm: if axis != 1: return key = str(op) + str(shape) + str(axis) kwargs.update(dict(key=key)) if cudnn_off: return else: kwargs.update(dict(axis=axis, cudnn_off=cudnn_off)) nch = shape[axis] bn_gamma = mx.nd.random.uniform(shape=(nch,)) bn_gamma.attach_grad() bn_beta = mx.nd.random.uniform(shape=(nch,)) bn_beta.attach_grad() bn_running_mean = mx.nd.zeros(nch) bn_running_var = mx.nd.ones(nch) running_mean = mx.nd.zeros(nch) running_var = mx.nd.ones(nch) num_iters = 10 expand_shape = [1] * len(shape) expand_shape[axis] = shape[axis] for _ in range(num_iters): data = mx.nd.random.uniform(shape=shape) data.attach_grad() ograd = mx.nd.random.uniform(shape=shape) with mx.autograd.record(): output = op(data, bn_gamma, bn_beta, bn_running_mean, bn_running_var, momentum=momentum, eps=epsilon, fix_gamma=False, **kwargs) if output_mean_var: output, output_mean, output_std = output output.backward(ograd) mx.nd.waitall() data_mean = data.mean( axis=axis, exclude=True, keepdims=True) data_var = (data - data_mean).square().mean(axis=axis, exclude=True, keepdims=True) target_output = (data - data_mean) / \ (data_var + epsilon).sqrt() * \ bn_gamma.reshape(expand_shape) + \ bn_beta.reshape(expand_shape) # squeeze data_mean and data_var data_mean_flat = data_mean.squeeze() data_var_flat = data_var.squeeze() running_mean = running_mean * momentum + \ data_mean_flat * (1 - momentum) running_var = running_var * momentum + \ data_var_flat * (1 - momentum) W = bn_gamma.reshape(expand_shape) dnx = ograd * W xsm = data - data_mean nd = 1.0 / mx.nd.sqrt(data_var + epsilon) nx = xsm * nd m = np.prod(shape) / shape[axis] dvar = (dnx * xsm).sum(axis=axis, keepdims=True, exclude=True) * (-0.5) * mx.nd.power(nd, 3) dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \ dvar * xsm.mean(axis=axis, keepdims=True, exclude=True) * 2.0 dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m) dW = (ograd * nx).sum(axis=axis, exclude=True) db = ograd.sum(axis=axis, exclude=True) atol = 1e-2 rtol = 1e-2 if output_mean_var: assert_almost_equal(output_mean.asnumpy(), data_mean_flat.asnumpy(), atol=atol, rtol=rtol) if op != mx.nd.contrib.SyncBatchNorm: assert_almost_equal(output_std.asnumpy(), (1.0 / (data_var_flat + epsilon).sqrt()).asnumpy(), atol=atol, rtol=rtol) else: assert_almost_equal(output_std.asnumpy(), data_var_flat.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal(output.asnumpy(), target_output.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal(bn_running_mean.asnumpy( ), running_mean.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal(bn_running_var.asnumpy( ), running_var.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal(data.grad.asnumpy(), dX.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal( bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol) assert_almost_equal( bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol) for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]: for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]: for axis in range(len(shape)): for cudnn_off in [False, True]: for output_mean_var in [False, True]: _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var) @with_seed() def test_groupnorm(): acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'} def x_hat_helper(x, num_groups, eps): dtype = x.dtype dshape = x.shape assert len(dshape) == 4 acc_type = acc_types[str(dtype)] new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3]) new_moments_shape = (dshape[0], num_groups, 1, 1, 1) data = x.reshape(new_shape) mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype) std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps) x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape) return x_hat, mean, std def np_groupnorm(data, gamma, beta, num_groups, eps): new_param_shape = (1, num_groups, 1, 1, 1) x_hat, mean, std = x_hat_helper(data, num_groups, eps) out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape) return out.reshape(dshape), mean, std def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps): x_hat, mean, std = x_hat_helper(data, num_groups, eps) new_shape = x_hat.shape dshape = data.shape dtype = data.dtype new_moments_shape = (new_shape[0], num_groups, 1, 1, 1) new_param_shape = (1, num_groups, 1, 1, 1) acc_type = acc_types[str(dtype)] ograd = ograd.reshape(new_shape) data = data.reshape(new_shape) gamma = gamma.reshape(new_param_shape) beta = beta.reshape(new_param_shape) mean = mean.reshape(new_moments_shape) std = std.reshape(new_moments_shape) beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) x_hat_grad = ograd * gamma ograd_mult = x_hat_grad / std red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype) data_grad = ograd_mult - red_out red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype) data_grad = data_grad - x_hat * red_out return data_grad.reshape(dshape), gamma_grad, beta_grad batch_size = random.randint(1, 8) num_groups = random.randint(2, 3) num_channels = random.randint(2, 3) * num_groups height = random.randint(1, 5) width = random.randint(1, 5) dshape = (batch_size, num_channels, height, width) param_shape = (num_groups,) temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width) np_data = np.random.uniform(0.2, 1.0, dshape) np_gamma = np.random.uniform(-1.0, 1.0, param_shape) np_beta = np.random.uniform(-1.0, 1.0, param_shape) data_sym = mx.sym.Variable("data") gamma_sym = mx.sym.Variable("gamma") beta_sym = mx.sym.Variable("beta") for dtype in [np.float16, np.float32, np.float64]: eps = 1e-2 if dtype == np.float16 else 1e-5 mx_data = mx.nd.array(np_data, dtype=dtype) mx_gamma = mx.nd.array(np_gamma, dtype=dtype) mx_beta = mx.nd.array(np_beta, dtype=dtype) np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype), np_gamma.astype(dtype), np_beta.astype(dtype), num_groups=num_groups, eps=eps) mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym, num_groups=num_groups, eps=eps, output_mean_var=True) check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std], rtol=1e-2 if dtype == np.float16 else 1e-3, atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype) mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym, num_groups=num_groups, eps=eps, output_mean_var=False) np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype) np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd, np_data.astype(dtype), np_gamma.astype(dtype), np_beta.astype(dtype), np_mean, np_std, num_groups, eps) check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)], [np_data_grad, np_gamma_grad, np_beta_grad], rtol=1e-2 if dtype == np.float16 else 1e-3, atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype) @with_seed() def test_convolution_grouping(): for dim in [1, 2, 3]: num_filter = 4 for num_group in [1, 2]: kernel = (3,) * dim shape = (1, 4) + (9,) * dim x = mx.sym.Variable('x') w = mx.sym.Variable('w') b = mx.sym.Variable('b') y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel) xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1) wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0) bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0) y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i], num_filter=num_filter//num_group, kernel=kernel) for i in range(num_group)]) exe1 = y1.simple_bind(default_context(), x=shape) exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,)) for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays): arr1[:] = np.float32(np.random.normal(size=arr1.shape)) arr2[:] = arr1 exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) exe2.forward(is_train=True) exe2.backward(exe2.outputs[0]) for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays): np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3) @unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052") @with_seed() def test_depthwise_convolution(): for dim in [1,2]: for num_base in [1, 4, 16, 32, 64]: for kernel_x in [3, 5]: for stride_x in [1, 2]: for pad_x in [0, 1]: for in_size in [7, 32]: kernel = (kernel_x,) * dim stride = (stride_x,) * dim pad = (pad_x,) * dim num_filter = num_base num_group = num_base shape = (2, num_base) + (in_size,) * dim x = mx.sym.Variable('x') w = mx.sym.Variable('w') b = mx.sym.Variable('b') y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel, stride=stride, pad=pad) xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1) wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0) bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0) y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i], num_filter=num_filter//num_group, kernel=kernel, stride=stride, pad=pad) for i in range(num_group)]) dev = default_context() exe1 = y1.simple_bind(dev, x=shape) exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel, b=(num_filter,)) for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays): arr1[:] = np.random.normal(size=arr1.shape) arr2[:] = arr1 exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) exe2.forward(is_train=True) exe2.backward(exe2.outputs[0]) for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays): assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3) @with_seed() def test_convolution_independent_gradients(): # NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603. # GPU context will be enabled after figuring out the possible issue tracked at # https://github.com/apache/incubator-mxnet/issues/15638. ctx = mx.cpu() atol = 1.0e-3 rtol = 1.0e-3 reqs = ["null", "write", "add"] var_names = ["x", "w", "b"] dims = [1, 2] num_bases = [1, 8] kernel_xs = [3, 5] stride_xs = [1, 2] pad_xs = [0, 1] in_sizes = [7, 32] no_biases = [True, False] for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \ itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases): # Prepare params shape kernel = (kernel_x,) * dim stride = (stride_x,) * dim pad = (pad_x,) * dim num_filter = num_base x_shape = (2, num_base) + (in_size,) * dim w_shape = (num_filter, num_base) + kernel # Symbols definition x = mx.sym.Variable('x') w = mx.sym.Variable('w') b = mx.sym.Variable('b') if not no_bias else None conv = mx.sym.Convolution(x, w, b, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=no_bias) for req_kind in reqs: # Binding args for conv with possible dependent gradients base_args = { 'x': mx.nd.random.normal(shape=x_shape, ctx=ctx), 'w': mx.nd.random.normal(shape=w_shape, ctx=ctx), 'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None} args1 = copy.deepcopy(base_args) grad1 = { 'x': mx.nd.zeros(shape=x_shape, ctx=ctx), 'w': mx.nd.zeros(shape=w_shape, ctx=ctx), 'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None} grad_req1 = [req_kind] * 3 grad_req1 = dict(zip(var_names, grad_req1)) exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1) exe1.forward(is_train=True) exe1.backward(exe1.outputs[0]) for x_req, w_req, b_req in itertools.product(reqs, repeat=3): # Binding args for conv with independent gradients args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1` grad2 = { 'x': mx.nd.zeros(shape=x_shape, ctx=ctx), 'w': mx.nd.zeros(shape=w_shape, ctx=ctx), 'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None} grad_req2 = {"x": x_req, "w": w_req, "b": b_req} exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2) exe2.forward(is_train=True) np.testing.assert_allclose(exe1.outputs[0].asnumpy(), exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol) exe2.backward(exe2.outputs[0]) for var_name in var_names: if var_name == "b" and no_bias: continue if grad_req2[var_name] == "null": exe2_var_grad = grad2[var_name].asnumpy() np.testing.assert_allclose(exe2_var_grad, np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol) if grad_req2[var_name] != grad_req1[var_name]: continue np.testing.assert_allclose(args1[var_name].asnumpy(), args2[var_name].asnumpy(), rtol=rtol, atol=atol) np.testing.assert_allclose(grad1[var_name].asnumpy(), grad2[var_name].asnumpy(), rtol=rtol, atol=atol) def gen_broadcast_data(idx): # Manually set test cases binary_op_data_shape = np.array( [[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]], [[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]], [[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]], [[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]], [[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]], [[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]], [[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]], [[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]], [[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]], [[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]], [[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]], [[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]], [[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]], [[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]], [[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]], [[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]], [[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]], [[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]], [[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]], [[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]], [[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]], [[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]], [[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]], [[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]], [[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]], [[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]], [[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]], [[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]], [[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]], [[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]], [[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]]) if idx < binary_op_data_shape.shape[0]: l_shape = binary_op_data_shape[idx][0] r_shape = binary_op_data_shape[idx][1] else: # Generate random data that has ndim between 1-7 and all the shape dims between 1-5 ndim = np.random.randint(1, 6) shape = np.random.randint(1, 6, size=(ndim,)) l_same_dim = np.random.randint(0, 5) r_same_dim = np.random.randint(0, 5) l_axis_flags = np.random.randint(0, 2, size=ndim) r_axis_flags = np.random.randint(0, 2, size=ndim) if l_same_dim == 4: l_axis_flags = np.ones(ndim) if r_same_dim == 4: r_axis_flags = np.ones(ndim) l_shape = shape.copy() r_shape = shape.copy() l_shape[np.where(l_axis_flags == 0)] = 1 r_shape[np.where(r_axis_flags == 0)] = 1 return [np.random.random(l_shape), np.random.random(r_shape)] def gen_broadcast_data_int(idx): d = gen_broadcast_data(idx); return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)] def gen_binary_data(dummy): ndim = np.random.randint(1, 6) shape = np.random.randint(1, 6, size=(ndim,)) #print("gen shape {}".format(shape)) return [np.random.random(shape), np.random.random(shape)] def gen_binary_data_int(dummy): d = gen_binary_data(dummy); return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)] def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None): sample_num = 200 for i in range(sample_num): d = gen_data(i) y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])}) y.forward(is_train=True) y = y.outputs[0].asnumpy() x = baseline(d[0], d[1]).astype(y.dtype) #np.set_printoptions(precision=20) a = d[0] b = d[1] #print("a: {} {}".format(a.dtype, a)) #print("a: {} {}".format(b.dtype, b)) #print("x: {} {}".format(x.dtype, x)) #print("y: {} {}".format(y.dtype, y)) if mx_nd_func is not None: d0 = mx.nd.array(d[0], dtype=d[0].dtype) d1 = mx.nd.array(d[1], dtype=d[1].dtype) assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol) idx = np.abs(x-y) > atol+rtol*np.abs(x) if idx.any(): import binascii np.set_printoptions(precision=20) logging.error('found precision problem:') d[0] = np.broadcast_to(d[0], x.shape) d[1] = np.broadcast_to(d[1], x.shape) logging.error('input a: {}'.format(d[0][idx])) logging.error('input b: {}'.format(d[1][idx])) logging.error("output x: {} {}".format(x.dtype, x)) logging.error("output y: {} {}".format(y.dtype, y)) def ftohex(xs): import struct return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten())) logging.error('output x in baseline(a, b): {}'.format(x[idx])) logging.error('output y in symbol(a, b): {}'.format(y[idx])) logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx]))) logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx]))) logging.error('input a hex: {}'.format(ftohex(d[0][idx]))) logging.error('input a hex: {}'.format(ftohex(d[1][idx]))) logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx])) assert_allclose(y, x, rtol=rtol, atol=atol) def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5): sample_num = 200 for i in range(sample_num): d = gen_data(i) out = np.random.random((d[0] + d[1]).shape) def reduce_op(shape, x): if shape == x.shape: return x keepdims_shape = list(x.shape) for i in range(len(shape)): if x.shape[i] != shape[i]: keepdims_shape[i] = 1 x = np.sum(x, axis=i).reshape(keepdims_shape) return x baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1]) x_1 = reduce_op(d[0].shape, baseline_grad1) x_2 = reduce_op(d[1].shape, baseline_grad2) y_1 = mx.nd.empty(d[0].shape) y_2 = mx.nd.empty(d[1].shape) y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])}, args_grad=[y_1, y_2]) y.forward(is_train=True) y.backward([mx.nd.array(out)]) assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol) assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol) @with_seed() def test_binary_op(): a = mx.sym.Variable('a') b = mx.sym.Variable('b') def test_bplus(a, b): c = a + b check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data) def test_bminus(a, b): c = a - b check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data) def test_bmul(a, b): c = a * b check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data) def test_bdiv(a, b): c = a / b check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data) def test_bmod(a, b): # Python and numpy operate only in double so to avoid numerical errors we have to use # doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044 #c = a % b c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64') # '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data) def test_bmod_int(a, b): c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32') check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int) def test_bpow(a, b): c = a ** b check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b, g_out * a ** b * np.log(a)), gen_binary_data) def test_bneq(a, b): c = a != b # '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32. # Issue exposed with seed 1644387363 check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data) test_bplus(a, b) test_bminus(a, b) test_bmul(a, b) test_bdiv(a, b) test_bmod(a, b) test_bmod_int(a, b) test_bpow(a, b) test_bneq(a, b) @with_seed() def test_broadcast_binary_op(): def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol): """This function ensures that checking the numerical gradient of broadcast_max/min is not crossing the boundary y=x where there is no gradient definition at those sigularities.""" x_max = np.max(x) y = x_max + 2 * delta + np.random.random(y.shape) check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol) x_min = np.min(x) y = x_min - 2 * delta - np.random.random(y.shape) check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol) a = mx.sym.Variable('a') b = mx.sym.Variable('b') def test_bplus(a, b): c = mx.sym.broadcast_plus(a, b) check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add) check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data) def test_bminus(a, b): c = mx.sym.broadcast_minus(a, b) check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data) def test_bmul(a, b): c = mx.sym.broadcast_mul(a, b) check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply) check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data) def test_bdiv(a, b): c = mx.sym.broadcast_div(a, b) check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide) check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data) def test_bmod(a_, b_): # Python and numpy operate only in double so to avoid numerical errors we have to use # doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044 a = mx.sym.cast(a_, dtype='float64') b = mx.sym.cast(b_, dtype='float64') # '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32. c = mx.sym.broadcast_mod(a, b) check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo) check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data) def test_bmod_int(a, b): c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32')) check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int) def test_bpow(a, b): c = mx.sym.broadcast_power(a, b) check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power) check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b, g_out * a ** b * np.log(a)), gen_broadcast_data) def test_bequal(a, b): c = mx.sym.broadcast_equal(a, b) check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int, mx_nd_func=mx.nd.equal) check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int) def test_bmax(a, b): c = mx.sym.broadcast_maximum(a, b) check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_bmin(a, b): c = mx.sym.broadcast_minimum(a, b) check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_band(a, b): c = mx.sym.broadcast_logical_and(a, b) check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_bor(a, b): c = mx.sym.broadcast_logical_or(a, b) check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) def test_bxor(a, b): c = mx.sym.broadcast_logical_xor(a, b) check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor) # pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big data = gen_broadcast_data(idx=200) check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3) test_bplus(a, b) test_bminus(a, b) test_bmul(a, b) test_bdiv(a, b) test_bmod(a, b) test_bmod_int(a, b) test_bpow(a, b) test_bequal(a, b) test_bmax(a, b) test_bmin(a, b) test_band(a, b) test_bor(a, b) test_bxor(a, b) @with_seed() def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False): dim = len(dil) assert(len(kernel_shape) == dim) # Input for spike response data_size = 33 data_shape = (1, 1) + (data_size,) * dim center = (0,0) + (data_size // 2,) * dim spike_imgs = np.zeros(shape=data_shape, dtype=np.float32) spike_imgs[center] = 1.0 spike_img = mx.nd.array(spike_imgs) spike_img2 = mx.nd.array(spike_imgs) kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32) kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32) kernel = mx.symbol.Variable('kernel') in_img = mx.symbol.Variable('input') net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution') net.list_arguments() be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights}, args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } ) be.forward(True) out_o = be.outputs[0].asnumpy() ndo = be.outputs[0] out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32) out_grads[center] = 1.0 out_grad = mx.nd.array(out_grads) be.backward([out_grad]) vgrad = be.grad_arrays[0].asnumpy() out = out_o.reshape(out_o.shape[2:]) nz_loc = np.nonzero(out) assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5) assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5) # Now check whether the input gradient was computed correctly input_grad = mx.nd.array(vgrad) be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights}) be.forward(True) out_o = be.outputs[0].asnumpy() assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5) rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32) impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16] rnd_kernel = mx.nd.array(rnd_kernel_s) rnd_kernel2 = mx.nd.array(rnd_kernel_s) white_in = mx.nd.ones(shape=data_shape) white_in2 = mx.nd.ones(shape=data_shape) be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel}, args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } ) be.forward(True) be.backward([impulse_error]) out_orig = be.outputs[0].asnumpy() kernel_gradient = be.grad_arrays[1].asnumpy() dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient) be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel}) be.forward(True) out = be.outputs[0].asnumpy() # Now do a simple check of the kernel gradient assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001) @with_seed() def test_convolution_dilated_impulse_response(): # 1D for dil in [ (1,), (2,), (3,) ]: for ks in [ (1,), (2,), (3,), (4,)]: test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks) # 2D for dil in [ (1,1), (2,2), (3,3) ]: for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]: test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks) @with_seed() def test_reshape(): def test_reshape_new(src_shape, shape_args, reverse, dst_shape): net = mx.sym.Variable("data") net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(data=src_shape) assert output_shape[0] == dst_shape, \ 'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \ 'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse), str(dst_shape), str(output_shape[0])) dat_npy = np.random.rand(*src_shape) grad_npy = np.random.rand(*dst_shape) exe = net.simple_bind(default_context(), data=src_shape) exe.arg_dict['data'][:] = dat_npy exe.forward(is_train=True) assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \ 'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\ %(str(src_shape), str(shape_args), str(reverse), str(dst_shape)) exe.backward(out_grads=mx.nd.array(grad_npy)) assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \ 'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\ %(str(src_shape), str(shape_args), str(reverse), str(dst_shape)) for i in range(len(src_shape)): holdout_src_shape = list(src_shape) holdout_src_shape[i] = 0 holdout_src_shape = tuple(holdout_src_shape) net = mx.sym.Variable('data') net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape)) input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape) assert output_shape[0] == dst_shape, \ 'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \ 'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse), str(dst_shape), str(output_shape[0])) assert input_shape[0] == src_shape, \ 'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \ 'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse), str(dst_shape), str(output_shape[0])) # Test new api (Using shape) test_cases = [ [(2, 3, 5, 5), (0, -1), False, (2, 75)], [(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)], [(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)], [(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)], [(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)], [(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)], [(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)], [(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)], [(2, 3, 5, 6), (-3, -3), False, (6, 30)], [(2, 3, 5, 6), (-3, -1), False, (6, 30)], [(64,), (-4, 16, 4), False, (16, 4)], [(64,), (-4, 16, -1), False, (16, 4)], [(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)], [(2, 3, 5, 5), (0, -1), True, (5, 30)], [(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)], [(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)], [(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)], [(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)], [(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)], [(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)], [(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)], [(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)], [(2, 3, 5, 6), (-3, -3), True, (6, 30)], [(64,), (16, 4, -4), True, (16, 4)], [(64,), (16, -1, -4), True, (16, 4)], [(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]] for test_case in test_cases: test_reshape_new(*test_case) # Test old api net = mx.sym.Variable("data") net = mx.sym.Reshape(net, target_shape=(2, 0)) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5)) assert(output_shape[0] == (2, 75)) # Test for Flatten data = mx.sym.Variable("data") net = mx.sym.Flatten(data) exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7)) data_npy = np.random.normal(size=(5, 4, 3, 7)) out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7)) outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy() assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7))) exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())]) assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7))) @with_seed() def test_reshape_like(): def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape): lhs = mx.sym.Variable("lhs") rhs = mx.sym.Variable("rhs") net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape) assert output_shape[0] == dst_shape, \ 'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\ %(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend)) lhs_npy = np.random.rand(*lhs_shape) rhs_npy = np.random.rand(*rhs_shape) grad_npy = np.random.rand(*dst_shape) exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape) exe.arg_dict['lhs'][:] = lhs_npy exe.arg_dict['rhs'][:] = rhs_npy exe.forward(is_train=True) assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \ 'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\ %(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend)) exe.backward(out_grads=mx.nd.array(grad_npy)) assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \ 'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\ %(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend)) # Test new api (Using shape) test_cases = [ [(30,), (15,2,4), 0, None, 0, 2, (15,2)], [(30,), (15,2,4), None, 1, None, 2, (15,2)], [(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)], [(3,5), (1,15,4), 0, 2, 1, 2, (15,)], [(3,5), (1,15,4), 0, None, 1, -1, (15,)], [(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)], [(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)] ] # for test_case in test_cases: for test_case in test_cases: test_reshape_like_new(*test_case) # Test old api lhs = mx.sym.Variable("lhs") rhs = mx.sym.Variable("rhs") net = mx.sym.reshape_like(lhs, rhs) js = net.tojson() net = mx.sym.load_json(js) _, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2)) assert(output_shape[0] == (30,20,2)) @with_seed() def test_reduce(): sample_num = 500 def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0, test_exclude=True, test_none_axis=False): for i in range(sample_num): # Generate random data that has ndim between 1-7 and all the shape dims between 1-5 # Insert a NaN with probability equal to nan_prob ndim = np.random.randint(1, 6) shape = np.random.randint(1, 6, size=(ndim,)) axis_num = np.random.randint(0, ndim, size=1) axis_flags = np.random.randint(0, 2, size=ndim) if test_exclude: exclude = np.random.randint(0, 2) else: exclude = False axes = [] for (axis, flag) in enumerate(axis_flags): if flag: axes.append(axis) if 0 == len(axes): axes = None elif 1 == len(axes): axes = axes[0] else: axes = tuple(axes) keepdims = np.random.randint(0, 2) a = mx.symbol.Variable('a') if axes is None: if test_none_axis: b = mx_reduce_sym(a, keepdims=keepdims, axis=axes) else: b = mx_reduce_sym(a, keepdims=keepdims) elif exclude and isinstance(axes, tuple) and len(axes) < ndim: naxes = [i for i in range(ndim) if i not in axes] b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True) else: b = mx_reduce_sym(a, axis=axes, keepdims=keepdims) dat_npy = np.random.rand(*shape) # Test with both negative and positive values (randomly). Avoid having both in the same # test, which can be problematic for error checking due to near-zero values. if np.random.rand() > 0.5: dat_npy = -dat_npy if nan_prob > 0: dat_npy[np.random.rand(*shape) < nan_prob] = np.nan sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims)) if sum_groundtruth.shape == (): sum_groundtruth = np.array([sum_groundtruth]) grad_nd = mx.nd.empty(shape) outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape)) keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy, outdata=sum_groundtruth, axis=axes, keepdims=keepdims, keepdim_shape=keepdim_shape) net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)}, args_grad={'a': grad_nd}) net.forward(is_train=True) equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4) assert equal_forward net.backward(out_grads=mx.nd.array(outgrad_npy)) bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape) equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4) assert equal_backward test_none_axis = [True, False] for test_none in test_none_axis: test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape), mx.symbol.sum, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape)/(data.size/outdata.size), mx.symbol.mean, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data), mx.symbol.prod, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)), mx.symbol.nansum, 0.3, test_none_axis=test_none) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data)), mx.symbol.nanprod, 0.3, test_none_axis=test_none) # grad of max and min are sensitive to the precision of the calculation. # Force numpy to match mxnet's float32. test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (np.equal(np.float32(data), outdata.reshape(keepdim_shape))), mx.symbol.max) test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (np.equal(np.float32(data), outdata.reshape(keepdim_shape))), mx.symbol.min) test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm), lambda outgrad, data, outdata, axis, keepdims, keepdim_shape: outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)), mx.symbol.norm, test_exclude=False, test_none_axis=test_none) @with_seed() def test_broadcast(): sample_num = 200 for i in range(sample_num): # Generate random data that has ndim between 1-7 and all the shape dims between 1-5 ndim = np.random.randint(1, 6) target_shape = np.random.randint(1, 6, size=(ndim,)) axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1)))) shape = target_shape.copy() size = tuple([shape[ele] for ele in axis]) for ele in axis: shape[ele] = 1 target_shape_with_zero = list(target_shape) for idx in range(len(target_shape_with_zero)): if idx not in axis: target_shape_with_zero[idx] = 0 break a = mx.symbol.Variable('a') sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size) sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape)) sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero)) sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to) def test_broadcasting_ele(sym_bcast): dat_npy = np.random.rand(*shape) groundtruth = dat_npy grad_nd = mx.nd.empty(shape) outgrad_npy = np.random.rand(*target_shape) grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True, numpy_reduce_func=np.sum) net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)}, args_grad={'a': grad_nd}) net.forward(is_train=True) assert (net.outputs[0].shape == target_shape).all() assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4) net.backward(out_grads=mx.nd.array(outgrad_npy)) assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4) test_broadcasting_ele(sym_bcast_axis) test_broadcasting_ele(sym_bcast_to) test_broadcasting_ele(sym_bcast_to_with_zero) test_broadcasting_ele(sym_bcast_like) @with_seed() def test_transpose(): for ndim in range(1, 7): for t in range(5): dims = list(np.random.randint(1, 10, size=ndim)) axes = list(range(ndim)) random.shuffle(axes) axes = tuple(axes) x = mx.nd.array(np.random.normal(size=dims)) y = mx.nd.transpose(x, axes=axes) assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy()) y = mx.nd.transpose(x) assert_allclose(np.transpose(x.asnumpy()), y.asnumpy()) @with_seed() def test_pseudo2dtranspose(): def getTwoInts(mn, mx): n1 = np.random.randint(mn, mx) n2 = np.random.randint(mn, mx-1) n2 = n2 if n2 < n1 else n2+1 return tuple(np.sort([n1, n2])) def getTranspAxes(ndim): axes = list(range(ndim)) n1, n2 = getTwoInts(0,ndim) return tuple(axes[:n1]+axes[n2:]+axes[n1:n2]) for ndim in range(2, 7): for dt in ['int8', 'half', 'int32', 'int64']: for _ in range(5): dims = list(np.random.randint(5, 20, size=ndim)) axes = getTranspAxes(ndim) x = mx.nd.array(np.random.normal(size=dims), dtype=dt) y = mx.nd.transpose(x, axes=axes) assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy()) @with_seed() def test_big_transpose(): n = [1] d = list(np.random.randint(132, 160, size=1)) hw = list(np.random.randint(256, 320, size=2)) c = [10] dims = n + d + hw + c axes = (0,4,1,2,3) x_np = np.random.normal(size=dims).astype('uint8') x = mx.nd.array(x_np, dtype='uint8') y = mx.nd.transpose(x, axes=axes) assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8')) axes = (0,2,3,4,1) z = mx.nd.transpose(y, axes=axes) assert_allclose(x_np, z.asnumpy().astype('uint8')) def test_larger_transpose(): x = mx.nd.random.normal(shape=(50,51)) y = mx.nd.transpose(x) assert_allclose(np.transpose(x.asnumpy()), y.asnumpy()) @with_seed() def test_expand_dims(): for ndim in range(1, 6): for axis in range(-ndim + 1, ndim): x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim))) y = mx.nd.array(x) x1 = np.expand_dims(x, axis=axis) y1 = mx.nd.expand_dims(y, axis=axis) assert_allclose(x1, y1.asnumpy()) assert_allclose(x1.shape, y1.shape) @with_seed() def test_crop(): for ndim in range(1, 6): for t in range(5): dims = [] begin = [] end = [] idx = [] for i in range(ndim): d = random.randint(1, 5) b = random.randint(0, d-1) e = random.randint(b+1, d) if b == 0 and random.randint(0, 1): b = None elif b != 0 and random.randint(0, 1): b -= d if e == d and random.randint(0, 1): e = None elif e != d and random.randint(0, 1): e -= d dims.append(d) begin.append(b) end.append(e) idx.append(slice(b, e)) x = mx.nd.array(np.random.normal(size=dims)) y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end)) assert_allclose(x.asnumpy()[idx], y.asnumpy()) vx = mx.sym.Variable('x') vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end)) check_numeric_gradient(vy, [x.asnumpy()]) @with_seed() def test_slice_axis(): for ndim in range(1, 6): shape = np.random.randint(1, 11, size=(ndim,)) for t in range(ndim): d = shape[t] b = random.randint(0, d-1) e = random.randint(b+1, d) if np.random.rand() > 0.6: e = None else: if e < d and np.random.rand() > 0.5: e = e - d if np.random.rand() > 0.5: b = b - d idx = [] for i in range(ndim): idx.append(slice(0, shape[i])) idx[t] = slice(b, e) X = mx.symbol.Variable('X') x = mx.nd.array(np.random.normal(size=shape)) Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e) xgrad = mx.nd.empty(x.shape) exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad}) exec1.forward(is_train=True) y = exec1.outputs[0] assert_allclose(x.asnumpy()[idx], y.asnumpy()) exec1.backward([y]) xx = x.asnumpy() xx[:] = 0.0 xx[idx] = x.asnumpy()[idx] assert_allclose(xx, xgrad.asnumpy()) x_grad_npy = np.random.normal(size=x.shape) xgrad = mx.nd.array(x_grad_npy) exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add") exec2.forward(is_train=True) exec2.backward([exec2.outputs[0]]) xx = np.zeros(shape=x.shape, dtype=np.float32) xx[idx] = x.asnumpy()[idx] assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5) @with_seed() def test_slice_like(): for ndim in range(1, 6): from_shape = np.random.randint(1, 11, size=(ndim,)) shape = [s + np.random.randint(0, 3) for s in from_shape] for t in range(ndim): if t > 0: axes = np.random.randint(0, ndim, size=t).tolist() else: axes = [] idx = [] for i in range(ndim): idx.append(slice(0, shape[i])) if i in axes or not axes: idx[i] = slice(0, from_shape[i]) if axes: pos = np.random.randint(0, t) if axes[pos] > 0: axes[pos] -= ndim # negative index X = mx.symbol.Variable('X') X_1 = mx.symbol.Variable('X1') x = mx.nd.array(np.random.normal(size=shape)) x1 = mx.nd.array(np.random.normal(size=from_shape)) Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes) xgrad = mx.nd.empty(x.shape) xgrad1 = mx.nd.empty(x1.shape) exec1 = Y.bind(default_context(), args = [x, x1], args_grad = {'X': xgrad, 'X1': xgrad1}) exec1.forward(is_train=True) y = exec1.outputs[0] assert_allclose(x.asnumpy()[idx], y.asnumpy()) exec1.backward([y]) xx = x.asnumpy() xx[:] = 0.0 xx[idx] = x.asnumpy()[idx] assert_allclose(xx, xgrad.asnumpy()) assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy()) @with_seed() def test_slice_like_different_types(): x = [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.]] y = [[ 0., 0., 0.], [ 0., 0., 0.]] x = mx.nd.array(x) y = mx.nd.array(y).astype('int32') z = mx.nd.slice_like(x, y) assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]]) @with_seed() def test_reshape_like_different_types(): x = mx.nd.zeros((2, 3)) y = mx.nd.array([[1, 2], [3, 4], [5, 6]]) y = mx.nd.array(y).astype('int32') z = mx.nd.reshape_like(x, y) assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]]) @with_seed() def test_flip(): for ndim in range(1, 6): for t in range(5): dims = [random.randint(1,10) for i in range(ndim)] axis = random.randint(0, ndim-1) idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)] x = mx.nd.array(np.random.normal(size=dims)) y = mx.nd.flip(x, axis=axis) assert_allclose(x.asnumpy()[idx], y.asnumpy()) @with_seed() def test_stn(): import sys np.set_printoptions(threshold=sys.maxsize) num_filter = 2 # conv of loc net kernel = (3, 3) # conv of loc net num_hidden = 6 # fc of loc net for n in [1, 2, 3, 4]: for c in [1, 2, 3, 4]: for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1 for w in [5, 9, 13, 17]: data_shape = (n, c, h, w) target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2)) data = mx.sym.Variable(name="data") loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv") loc = mx.sym.Flatten(data=loc) loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc") stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape, transform_type="affine", sampler_type="bilinear") arg_names = stn.list_arguments() arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape) # check shape assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1]) dev = default_context() #dev = mx.gpu(0) args = {} args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev) args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev) args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev) args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev) args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev) grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes] exe = stn.bind(dev, args=args, args_grad=grad_grad) exe.forward(is_train=True) out = exe.outputs[0] # check forward assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4) out_grad = mx.nd.ones(out.shape, ctx=dev) exe.backward([out_grad]) # check backward assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4) def test_stn_valid_sampling(): target_shape = ( 28, 28, ) src_shape = ( 42, 42, ) data = mx.sym.Variable(name="data") loc = mx.sym.Variable(name="loc") data_array = np.zeros(( 1, 1, ) + src_shape) # Have an ever so slight rotation. loc_array = np.array( [[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901, -0.000919065]]) stn = mx.sym.SpatialTransformer( data=data, loc=loc, target_shape=target_shape, transform_type="affine", sampler_type="bilinear") grad_req = {k: 'write' for k in stn.list_arguments()} grads = { 'data': mx.nd.array(np.zeros_like(data_array)), 'loc': mx.nd.array(np.zeros_like(loc_array)) } executor = stn.bind( ctx=default_context(), args={'data': mx.nd.array(data_array), 'loc': mx.nd.array(loc_array)}, grad_req=grad_req, args_grad=grads) executor.forward(is_train=True) executor.backward(mx.nd.ones(( 1, 1, ) + target_shape)) @with_seed() def test_dot(): ctx = default_context() dtypes = ['float32', 'float64'] ndims = [2] if ctx.device_type == 'gpu': dtypes += ['float16'] ndims += [1] # Test normal dot. for ndim in ndims: for data_type in dtypes: tol = 1e-2 if data_type == 'float16' else 1e-3 for m in range(1, 5): for k in range(1, 5): if ndim == 1 and k != 1: pass for n in range(1, 5): a_shape = (m, k) if ndim == 2 else (m,) b_shape = (k, n) if ndim == 2 else (n,) a_npy = np.random.normal(0, 1, (m, k)) a_npy = a_npy.astype(data_type) b_npy = np.random.normal(0, 1, (k, n)) b_npy = b_npy.astype(data_type) c_npy = np.empty((m, n), dtype=data_type) ograd_npy = np.random.normal(0, 1, (m, n)) ograd_npy = ograd_npy.astype(data_type) agrad_npy = np.empty((m, k), dtype=data_type) bgrad_npy = np.empty((k, n), dtype=data_type) c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :]) bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :]) agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T) a = mx.sym.Variable('a', dtype=data_type) b = mx.sym.Variable('b', dtype=data_type) c = mx.sym.dot(a, b) exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape) outputs = exe.forward(is_train=True, a=a_npy, b=b_npy) assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol) exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)]) assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol) assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol) # Test dot with transpose flag using gradient checker. def dot_sym(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y) def dot_sym_xT(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y, transpose_a=True) def dot_sym_yT(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y, transpose_b=True) def dot_sym_xT_yT(data_type): x = mx.sym.Variable('x', dtype=data_type) y = mx.sym.Variable('y', dtype=data_type) return mx.sym.dot(x, y, transpose_a=True, transpose_b=True) for data_type in dtypes: for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]: m1_npy = np.random.uniform(-1, 1, ashape) m1_npy = m1_npy.astype(data_type) m2_npy = np.random.uniform(-1, 1, bshape) m2_npy = m2_npy.astype(data_type) check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3) @with_seed() def test_batch_dot(): ctx = default_context() dtypes = ['float32', 'float64'] if ctx.device_type == 'gpu': dtypes += ['float16'] for data_type in dtypes: for batch_size in range(1, 5): for m in range(1, 5): for k in range(1, 5): for n in range(1, 5): transpose_a = (np.random.rand() > 0.5) transpose_b = (np.random.rand() > 0.5) a_npy = np.random.normal(0, 1, (batch_size, m, k)) a_npy = a_npy.astype(data_type) b_npy = np.random.normal(0, 1, (batch_size, k, n)) b_npy = b_npy.astype(data_type) c_npy = np.empty((batch_size, m, n), dtype=data_type) ograd_npy = np.random.normal(0, 1, (batch_size, m, n)) ograd_npy = ograd_npy.astype(data_type) agrad_npy = np.empty((batch_size, m, k), dtype=data_type) bgrad_npy = np.empty((batch_size, k, n), dtype=data_type) a_init_grad_npy = np.random.normal(size=(batch_size, m, k)) a_init_grad_npy = a_npy.astype(data_type) b_init_grad_npy = np.random.normal(size=(batch_size, k, n)) b_init_grad_npy = b_npy.astype(data_type) for i in range(batch_size): c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :]) bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :]) agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T) a = mx.sym.Variable('a', dtype=data_type) b = mx.sym.Variable('b', dtype=data_type) c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b) if transpose_a: a_npy = np.transpose(a_npy, axes=(0, 2, 1)) agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1)) a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1)) if transpose_b: b_npy = np.transpose(b_npy, axes=(0, 2, 1)) bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1)) b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1)) exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape, grad_req='write') exe_add = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape, grad_req='add') exe_add.grad_dict['a'][:] = a_init_grad_npy exe_add.grad_dict['b'][:] = b_init_grad_npy outputs = exe.forward(is_train=True, a=a_npy, b=b_npy) assert_almost_equal(outputs[0], c_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)]) assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) exe_add.forward(is_train=True, a=a_npy, b=b_npy) exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)]) assert_almost_equal(exe_add.grad_dict['a'], agrad_npy + a_init_grad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) assert_almost_equal(exe_add.grad_dict['b'], bgrad_npy + b_init_grad_npy, rtol=1e-2 if data_type == 'float16' else 1e-3, atol=1e-2 if data_type == 'float16' else 1e-4) def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply): img1 = mx.sym.Variable('img1') img2 = mx.sym.Variable('img2') return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement, stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply) def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply): # compute output's dimension paddedbottomheight = data1.shape[2] + 2 * pad_size paddedbottomwidth = data1.shape[3] + 2 * pad_size kernel_radius = (kernel_size - 1) // 2 border_size = max_displacement + kernel_radius top_width = (paddedbottomwidth - border_size * 2) // stride1 top_height = (paddedbottomheight - border_size * 2) // stride1 neighborhood_grid_radius = max_displacement // stride2 neighborhood_grid_width = neighborhood_grid_radius * 2 + 1 top_channels = neighborhood_grid_width * neighborhood_grid_width out = np.zeros((data1.shape[0], top_channels, top_height, top_width)) tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth)) tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth)) tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:] tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:] for i in range(top_height): for j in range(top_width): for nbatch in range(data1.shape[0]): # x1,y1 is the location in data1 , i,j is the location in output x1 = j * stride1 + max_displacement y1 = i * stride1 + max_displacement for top_channel in range(top_channels): s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2 s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2 # location in data2 x2 = x1 + s2o y2 = y1 + s2p for h in range(kernel_size): for w in range(kernel_size): for channel in range(data1.shape[1]): if is_multiply: out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w] else: out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w]) out /= float(kernel_size**2*data1.shape[1]) return out,tmp1,tmp2 def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply): # compute output's dimension paddedbottomheight = data1.shape[2] + 2 * pad_size paddedbottomwidth = data1.shape[3] + 2 * pad_size kernel_radius = (kernel_size - 1) // 2 border_size = max_displacement + kernel_radius top_width = (paddedbottomwidth - border_size * 2) // stride1 top_height = (paddedbottomheight - border_size * 2) // stride1 neighborhood_grid_radius = max_displacement // stride2 neighborhood_grid_width = neighborhood_grid_radius * 2 + 1 top_channels = neighborhood_grid_width * neighborhood_grid_width out = np.zeros((data1.shape[0], top_channels, top_height, top_width)) tmp1_grad = np.zeros(tmp1.shape) tmp2_grad = np.zeros(tmp2.shape) for i in range(top_height): for j in range(top_width): for nbatch in range(data1.shape[0]): # x1,y1 is the location in data1 , i,j is the location in output x1 = j * stride1 + max_displacement y1 = i * stride1 + max_displacement for top_channel in range(top_channels): s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2 s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2 # location in data2 x2 = x1 + s2o y2 = y1 + s2p for h in range(kernel_size): for w in range(kernel_size): for channel in range(data1.shape[1]): if is_multiply: tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w] tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w] else: sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1 tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn) tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1]) tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1]) return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]], def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype): img1 = np.random.random(data_shape) img1 = img1.astype(dtype) img2 = np.random.random(data_shape) img2 = img2.astype(dtype) net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply) net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply ) exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape) exe1.arg_dict['img1'][:] = img1 exe1.arg_dict['img2'][:] = img2 #cpu forward exe1.forward(is_train=True) # python forward forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply) # forward error assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4) # out_grad a = np.ones(forward_result.shape) out_grad1 = mx.nd.array(a,default_context()) # cpu backward exe1.backward(out_grads=out_grad1) # python backward grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply) # backward error assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4) assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4) @with_seed() def test_correlation(): def test_infer_type(dtype): a = mx.sym.Variable('a') b = mx.sym.Variable('b') corr = mx.sym.Correlation(data1=a, data2=b) arg_type1, out_type1, _ = corr.infer_type(a=dtype) if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype): msg = npt.npt.build_err_msg([a, b], err_msg="Inferred type from a is not as expected, " "Expected :%s %s %s, Got: %s %s %s" % (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]), names=['a', 'b']) raise AssertionError(msg) arg_type2, out_type2, _ = corr.infer_type(b=dtype) if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype): msg = npt.npt.build_err_msg([a, b], err_msg="Inferred type from b is not as expected, " "Expected :%s %s %s, Got: %s %s %s" % (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]), names=['a', 'b']) raise AssertionError(msg) for dtype in ['float16', 'float32']: test_infer_type(dtype) unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype) unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype) unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype) unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype) unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype) unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype) unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype) unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype) unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype) @with_seed() def test_support_vector_machine_l1_svm(): xpu = default_context() shape = (20, 10) X = mx.symbol.Variable('X') L = mx.symbol.Variable('L') Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True) x = mx.nd.empty(shape, ctx = xpu) l = mx.nd.empty((shape[0],), ctx = xpu) x_np = np.random.rand(*shape) l_np = np.random.randint(0, shape[1], (shape[0],)) x[:] = x_np l[:] = l_np grad = mx.nd.empty(shape, ctx = xpu) exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad}) exec1.forward(is_train=True) assert_almost_equal(x_np, exec1.outputs[0]) exec1.backward() l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1])) l_mask = np.array(l_mask, dtype=np.float32)*2 -1 grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0) assert_almost_equal(grad_np, grad) @with_seed() def test_support_vector_machine_l2_svm(): xpu = default_context() shape = (20, 10) X = mx.symbol.Variable('X') L = mx.symbol.Variable('L') Y = mx.symbol.SVMOutput(data=X, label=L) x = mx.nd.empty(shape, ctx = xpu) l = mx.nd.empty((shape[0],), ctx = xpu) x_np = np.random.rand(*shape) x_np = x_np.astype(np.float32) l_np = np.random.randint(0, shape[1], (shape[0],)) x[:] = x_np l[:] = l_np grad = mx.nd.empty(shape, ctx = xpu) exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad}) exec1.forward(is_train=True) assert_almost_equal(x_np, exec1.outputs[0]) exec1.backward() l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1])) l_mask = np.array(l_mask, dtype=np.float32)*2 -1 grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0) grad_np = grad_np.astype(np.float32) assert_almost_equal(grad_np, grad) # Seed set because the test is not robust enough to operate on random data @with_seed(1234) def test_roipooling(): data = mx.symbol.Variable(name='data') rois = mx.symbol.Variable(name='rois') test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1) x1 = np.random.rand(4, 3, 12, 8).astype('float32') x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32') check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data':'write', 'rois':'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1e-4) check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data':'add', 'rois':'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1E-4) def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"): # bind with label X = mx.symbol.Variable('X', dtype=dtype) Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width) x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu) # numpy result pad_grouped = list(zip(*[iter(list(pad_width))] * 2)) np_out = np.pad(x.asnumpy(), pad_grouped, mode) # mxnet result grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype) exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad}) exec1.forward(is_train=True) out = exec1.outputs[0] # compare numpy + mxnet assert_almost_equal(out, np_out) # grad check check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2) @with_seed() def test_pad(): ctx = default_context() shape1 = (2, 3, 3, 5) pad1 = (0, 0, 0, 0, 1, 2, 3, 4) shape2 = (2, 3, 3, 5, 4) pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1) # note: this op doesn't support ints yet. Add tests when supported dtypes = ["float16", "float32", "float64"] for dtype in dtypes: check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype) check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype) check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype) check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype) check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype) check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype) def np_instance_norm(data, weight, bias, eps): spatial_dims = data.shape[2::] num_spatial_vals = np.prod(np.array(spatial_dims)) scale = 1/float(num_spatial_vals) sum_axis = tuple(range(2, data.ndim)) mean = scale * np.sum(data, axis = sum_axis) mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape) var = scale * np.sum((data - mean)**2, axis = sum_axis) var = np.reshape(np.repeat(var, num_spatial_vals), data.shape) weightBatch = np.tile(weight, (data.shape[0], 1)) weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape) biasBatch = np.tile(bias, (data.shape[0], 1)) biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape) return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch def check_instance_norm_with_shape(shape, xpu): # bind with label eps = 0.001 X = mx.symbol.Variable('X') G = mx.symbol.Variable('G') B = mx.symbol.Variable('B') Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps) x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu) gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu) beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu) np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps) exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta}) exec1.forward(is_train=False) out = exec1.outputs[0] assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4) check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()}, numeric_eps=1e-2, rtol=1e-2, atol=1e-2) @with_seed() def test_instance_normalization(): check_instance_norm_with_shape((1, 1, 1), default_context()) check_instance_norm_with_shape((2, 1, 2), default_context()) check_instance_norm_with_shape((2,4,5,6), default_context()) check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context()) def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10): ctx = default_context() data = mx.symbol.Variable('data') out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps) in_data = np.random.uniform(-1, 1, in_shape).astype(dtype) # calculate numpy results if mode == 'channel': assert in_data.ndim > 2 np_norm = np.linalg.norm(in_data, axis=1) + norm_eps np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1) np_out = np.multiply(in_data, np_norm) elif mode == 'spatial': assert in_data.ndim > 2 s = in_data.shape np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2) np_out = np.multiply(in_data, np_norm.reshape(s)) elif mode == 'instance': assert in_data.ndim > 1 s = in_data.shape np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1) np_out = np.multiply(in_data, np_norm.reshape(s)) else: raise RuntimeError('Unknown l2 normalization mode') exe = out.simple_bind(ctx=ctx, data=in_data.shape) output = exe.forward(is_train=True, data=in_data) # compare numpy + mxnet assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5) # check gradient check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3) @with_seed() def test_l2_normalization(): for dtype in ['float16', 'float32', 'float64']: for mode in ['channel', 'spatial', 'instance']: nbatch = random.randint(1, 4) nchannel = random.randint(3, 5) height = random.randint(4, 6) check_l2_normalization((nbatch, nchannel, height), mode, dtype) width = random.randint(5, 7) check_l2_normalization((nbatch, nchannel, height, width), mode, dtype) def check_layer_normalization(in_shape, axis, eps, dtype=np.float32, forward_check_eps=1E-3, backward_check_eps=1E-3, npy_grad_check=True, finite_grad_check=True): def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5): if axis < 0: axis += data.ndim broadcast_shape = [1 for _ in range(data.ndim)] broadcast_shape[axis] = data.shape[axis] mean = data.mean(axis=axis, keepdims=True).astype(dtype) var = data.var(axis=axis, keepdims=True).astype(dtype) std = np.sqrt(var + dtype(eps)).astype(dtype) out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \ np.reshape(beta, broadcast_shape) return out def npy_layer_norm_grad(data, gamma, out_grad, axis, eps): if axis < 0: axis += data.ndim exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis]) data_mean = data.mean(axis=axis, keepdims=True) data_var = data.var(axis=axis, keepdims=True) data_std = np.sqrt(data_var + eps) centered_data = (data - data_mean) / data_std gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True) beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True) w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\ / data_std data_grad = w - w.mean(axis=axis, keepdims=True)\ - centered_data * (w * centered_data).mean(axis=axis, keepdims=True) gamma_grad = gamma_grad.reshape((-1,)) beta_grad = beta_grad.reshape((-1,)) return data_grad, gamma_grad, beta_grad ctx = default_context() data = np.random.normal(0, 1, in_shape).astype(dtype) gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) data_s = mx.symbol.Variable('data') gamma_s = mx.symbol.Variable('gamma') beta_s = mx.symbol.Variable('beta') out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps) exe = out_s.simple_bind(ctx, data=in_shape) exe.arg_dict['data'][:] = data exe.arg_dict['gamma'][:] = gamma exe.arg_dict['beta'][:] = beta out_nd = exe.forward()[0] out = npy_layer_norm(data, gamma, beta, axis, eps) assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps) if finite_grad_check: for req in ['write', 'add']: check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta}, grad_nodes={'data': req, 'gamma': req, 'beta': req}, numeric_eps=1e-2, rtol=1e-2, atol=1e-2) if npy_grad_check: # Test for grad_req = write out_grad = np.random.normal(0, 1, in_shape).astype(dtype) exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write') exe.arg_dict['data'][:] = data exe.arg_dict['gamma'][:] = gamma exe.arg_dict['beta'][:] = beta exe.forward() exe.backward([mx.nd.array(out_grad, ctx=ctx)]) gt_data_grad, gt_gamma_grad, gt_beta_grad =\ npy_layer_norm_grad(data, gamma, out_grad, axis, eps) assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps) # Test for grad_req = add out_grad = np.random.normal(0, 1, in_shape).astype(dtype) init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype) init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype) exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add') exe.arg_dict['data'][:] = data exe.arg_dict['gamma'][:] = gamma exe.arg_dict['beta'][:] = beta exe.grad_dict['data'][:] = init_data_grad exe.grad_dict['gamma'][:] = init_gamma_grad exe.grad_dict['beta'][:] = init_beta_grad exe.forward() exe.backward([mx.nd.array(out_grad, ctx=ctx)]) gt_data_grad, gt_gamma_grad, gt_beta_grad = \ npy_layer_norm_grad(data, gamma, out_grad, axis, eps) assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps) assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps) @with_seed() def test_norm(): try: import scipy assert LooseVersion(scipy.__version__) >= LooseVersion('0.1') from scipy.linalg import norm as sp_norm except (AssertionError, ImportError): print("Could not import scipy.linalg.norm or scipy is too old. " "Falling back to numpy.linalg.norm which is not numerically stable.") from numpy.linalg import norm as sp_norm def l1norm(input_data, axis=0, keepdims=True): return np.sum(abs(input_data), axis=axis, keepdims=keepdims) def l2norm(input_data, axis=0, keepdims=True): return sp_norm(input_data, axis=axis, keepdims=keepdims) ctx = default_context() data = mx.symbol.Variable('data') in_data_dim = random_sample([2,3,4], 1)[0] in_shape = rand_shape_nd(in_data_dim, dim=5) epsilon = 1e-3 acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64, np.int32: np.int32, np.int64: np.int64} dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64', np.int32: 'int32', np.int64: 'int64'} is_windows = sys.platform.startswith('win') for enforce_safe_acc in ["1", "0"]: if is_windows: if enforce_safe_acc == "0": break enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"] else: os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc for order in [1, 2]: for dtype in [np.float16, np.float32, np.float64]: for i in range(in_data_dim): for out_dtype in ['float32', 'float64']: backward_dtype = np.float32 if out_dtype == 'float32' else np.float64 accumulation_type = acc_type[dtype] if enforce_safe_acc == "0": backward_dtype = dtype out_dtype = dtype_to_str[dtype] accumulation_type = dtype skip_backward = 'int' in out_dtype in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type) in_data[abs(in_data) < epsilon] = 2 * epsilon norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True) npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i) npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)], rtol=1e-2 if dtype == np.float16 else 1e-3, atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype) if dtype is not np.float16 and not skip_backward: check_symbolic_backward(norm_sym, [in_data.astype(dtype)], [np.ones(npy_out.shape).astype(out_dtype)], [npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype) # Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509 # check gradient if dtype is not np.float16 and not skip_backward: check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3, dtype=backward_dtype) if i < in_data_dim-1: norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True) npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1)) npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)], rtol=1e-2 if dtype is np.float16 else 1e-3, atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx) if dtype is not np.float16 and not skip_backward: check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape).astype(out_dtype)], [npy_out_backward.astype(out_dtype)], rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype) # check gradient if dtype is not np.float16 and not skip_backward: check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3, dtype=backward_dtype) def test_layer_norm(): for enforce_safe_acc in ["1", "0"]: os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64], [1E-2, 1E-3, 1E-4], [1E-2, 1E-3, 1E-4]): if dtype != np.float16: in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False] else: in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l): for axis in range(-len(in_shape), len(in_shape)): for eps in [1E-2, 1E-3]: if dtype == np.float16: npy_grad_check = False else: npy_grad_check = True check_layer_normalization(in_shape, axis, eps, dtype=dtype, forward_check_eps=forward_check_eps, backward_check_eps=backward_check_eps, npy_grad_check=npy_grad_check, finite_grad_check=finite_grad_check) # Numpy Implementation of Sequence Ops def sequence_last_numpy(array, lengths, axis): # create new array of dims [batch, seqlen, ...] array2 = np.moveaxis(array, axis, 1) dims = array2.shape if lengths is None: return array2[:, -1] lengths = list(lengths) return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])]) def sequence_mask_numpy(array, lengths, axis, value): if lengths is None: return array arrayMask = array.copy() # conform to [batch, seqlen, ...] arrayMask = np.moveaxis(arrayMask, axis, 1) shape = arrayMask.shape lengths = list(lengths) for i in range(shape[0]): arrayMask[i, int(lengths[i]):] = value return np.moveaxis(arrayMask, 1, axis) def sequence_reverse_numpy(array, lengths, axis): rarray = array.copy() # conform to [batch, seqlen, ...] rarray = np.moveaxis(rarray, axis, 1) shape = rarray.shape if lengths is None: lengths = [shape[1]] * shape[0] lengths = list(lengths) for i in range(shape[0]): j = int(lengths[i]) rarray[i,:j] = rarray[i,:j][::-1] return np.moveaxis(rarray, 1, axis) def check_sequence_func(ftype, mask_value=0, axis=0): # bind with label xpu = default_context() X = mx.symbol.Variable('X') L = mx.symbol.Variable('L') # lengths shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)] for seqlenQ in [True, False]: for ary_dtype in [np.float32]: for idx_dtype in [np.int32, np.float32]: for s in shapes: x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu) batch = s[1] if (axis == 0) else s[0] seqlen = s[axis] l_np = np.random.randint(1, seqlen + 1, batch) l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu) if not seqlenQ: l_np = None args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis} if seqlenQ: args['sequence_length'] = L if ftype == "last": Y = mx.symbol.SequenceLast(**args) np_out = sequence_last_numpy(x.asnumpy(), l_np, axis) elif ftype == "mask": args['value'] = mask_value Y = mx.symbol.SequenceMask(**args) np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value) elif ftype == "reverse": Y = mx.symbol.SequenceReverse(**args) np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis) fargs = [x, l] if seqlenQ else [x] gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()] check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy") check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'}, numeric_eps=1e-2, rtol=1e-2) check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'}, numeric_eps=1e-3, rtol=1e-2, atol=1E-4) check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'}, numeric_eps=1e-3, rtol=1e-2, atol=1E-4) @with_seed() @unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395") def test_sequence_last(): check_sequence_func("last", axis=0) check_sequence_func("last", axis=1) @with_seed() def test_sequence_mask(): check_sequence_func("mask", axis = 0, mask_value=-2.3) check_sequence_func("mask", axis = 1, mask_value=0.3) def check_sequence_reverse(xpu): # sample data arr = np.array( [[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 13., 14., 15.], [ 16., 17., 18.]]]) arr1 = np.array( [[[ 13., 14., 15.], [ 16., 17., 18.]], [[ 7., 8., 9.], [ 10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]]]) arr2 = np.array( [[[ 7., 8., 9.], [ 10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]], [[ 13., 14., 15.], [ 16., 17., 18.]]]) arr3 = np.array( [[[ 7., 8., 9.], [ 16., 17., 18.]], [[ 1., 2., 3.], [ 10., 11., 12.]], [[ 13., 14., 15.], [ 4., 5., 6.]]]) # test for matrix case seq_len_1 = [1, 2, 2] arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32) arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32) def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False): # MxNet symbol creation seq = mx.sym.Variable('seq') if sequence_length and use_sequence_length: seq_len = mx.sym.Variable('seq_len') else: # ensure that both are disabled, not just one seq_len=None use_sequence_length=False rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length) # MxNet symbol execution if sequence_length: bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)}) else: bound = rev.bind(xpu, {'seq': mx.nd.array(arr)}) fwd = bound.forward() return fwd[0].asnumpy() # test cases assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1) assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1) assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2) assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3) assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5) @with_seed() def test_sequence_reverse(): check_sequence_func("reverse", axis=0) check_sequence_reverse(mx.cpu()) def mathematical_core_binary(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call1, backward_numpy_call2, data1_init=2., data2_init=3., grad_init=2.): data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') shape = (3, 4) data_tmp1 = np.random.rand(3, 4) data_tmp2 = np.random.rand(3, 4) data_tmp1[:] = data1_init data_tmp2[:] = data2_init arr_data1 = mx.nd.array(data_tmp1) arr_data2 = mx.nd.array(data_tmp2) arr_grad1 = mx.nd.empty(shape) arr_grad2 = mx.nd.empty(shape) test = forward_mxnet_call(data1, data2) exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = forward_numpy_call(data_tmp1, data_tmp2) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = grad_init exe_test.backward(out_grad) npout_grad = np.ones(shape) npout_grad[:] = grad_init npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2) npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2) assert_almost_equal(arr_grad1, npout_grad1) assert_almost_equal(arr_grad2, npout_grad2) def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:] = data_init arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) arr_grad[:] = 3 test = forward_mxnet_call(data) exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = forward_numpy_call(data_tmp) assert_almost_equal(out, npout) out_grad = mx.nd.empty(shape) out_grad[:] = grad_init npout_grad = out_grad.asnumpy() temp = backward_numpy_call(data_tmp) npout_grad = npout_grad * temp exe_test.backward(out_grad) assert_almost_equal(arr_grad, npout_grad) @with_seed() def test_special_functions_using_scipy(): try: from scipy import special as scipy_special except: print("Could not import scipy. Skipping unit tests for special functions") return # gamma mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x), lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5) # gammaln mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x), lambda x: scipy_special.psi(x), 0.5, 0.5) # erf mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x), lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5) # erfinv mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x), lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5) def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.): data = mx.symbol.Variable('data') shape = (3, 4) data_tmp = np.ones(shape) data_tmp[:] = data_init arr_data = mx.nd.array(data_tmp) test = forward_mxnet_call(data) exe_test = test.bind(default_context(), args=[arr_data]) exe_test.forward(is_train=True) out = exe_test.outputs[0] npout = forward_numpy_call(data_tmp) assert_almost_equal(out, npout) @with_seed() def test_mathematical(): # rsqrt mathematical_core("rsqrt", lambda x: mx.sym.rsqrt(x), lambda x: 1 / np.sqrt(x), lambda x: -(1.0 / (2.0 * x * np.sqrt(x)))) # tan mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1) # arcsin mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x), lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5) # arccos mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x), lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5) # arctan mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x), lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5) # hypot mathematical_core_binary("hypot", lambda x, y: mx.sym.hypot(x, y), lambda x, y: np.hypot(x, y), lambda x, y: x / np.hypot(x, y), lambda x, y: y / np.hypot(x, y), 0.5, 0.5, 0.5) # hypot scalar mathematical_core("hypot scalar", lambda x: mx.sym.hypot(x, 3), lambda x: np.hypot(x, 3), lambda x: x / np.hypot(x, 3), 0.5, 0.5) # degrees mathematical_core("degrees", lambda x: mx.sym.degrees(x), lambda x: np.degrees(x), lambda x: 180./np.pi, 0.5, 0.5) # radians mathematical_core("radians", lambda x: mx.sym.radians(x), lambda x: np.radians(x), lambda x: np.pi / 180., 0.6, 1) # sinh mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x)) # cosh mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5) # tanh mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1) # arcsinh mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x), lambda x: 1./(x**2 + 1.)**(1./2.)) # arccosh mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x), lambda x: 1./(x**2 - 1.)**(1./2.)) # arctanh mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x), lambda x: -1./(x**2 - 1.), 0.5) # log1p mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x), lambda x: 1. / (1.0 + x), 0.5, 0.5) # expm1 mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x), lambda x: np.exp(x), 0.5, 0.5) # log10 mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x), lambda x: 1. / (x * np.log(10.))) # log2 mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x), lambda x: 1. / (x * np.log(2.))) # rint rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x)) # fix rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x)) @with_seed() def test_special_functions_using_scipy(): try: from scipy import special as scipy_special except: print("Could not import scipy. Skipping unit tests for special functions") return # gamma mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x), lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5) # gammaln mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x), lambda x: scipy_special.psi(x), 0.5, 0.5) @with_seed() def test_clip(): data = mx.symbol.Variable('data') shape = (30, 30) data_tmp = np.random.uniform(-1, 1, shape).astype('float32') test = mx.sym.clip(data, a_max=0.6, a_min=-0.6) check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)]) check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])]) # Test monitor on symbol using clip def simple_callback(name, arr): pass exe = test.simple_bind(ctx=mx.current_context(), data=shape) exe.set_monitor_callback(simple_callback, monitor_all=True) exe.forward(is_train=True) exe.backward(out_grads=mx.nd.ones(shape)) mx.nd.waitall() @with_seed() def test_init(): def test_basic_val_init(sym_func, np_func, shape, dtype): x = sym_func(shape=shape, dtype=dtype) exe = x.bind(default_context(), args=[], args_grad=[]) exe.forward(is_train=True) assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype)) assert exe.outputs[0].asnumpy().dtype == dtype def test_arange(): # General Random Tests dtype_list = [np.float32, np.float64, np.int32, np.uint8] config_list = [(10,), (0, 10), (5, 100, 4), (50, -50, -2), (-100, 100, 1), (1.3, 456.6, 1.3)] for dtype in dtype_list: for config in config_list: repeats = random.choice([1, 3]) np_out = np.repeat(np.arange(*config, dtype=dtype), repeats) nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype) assert_almost_equal(np_out, nd_out) def test_arange_inferstop(): s = mx.sym.arange(start=0, stop=None, infer_range=True) s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5])) exe = s.bind(ctx=mx.cpu(), args={}) exe.forward() assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4])) def test_arange_like(): shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)] axis_list = [0, -1] for sh in shape_list: for axis in axis_list: val = np.random.rand(*sh) data = mx.nd.array(val) nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis) np_out = np.arange(start=0, stop=sh[axis]) assert_almost_equal(nd_out.asnumpy(), np_out) def test_arange_like_without_axis(): shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)] for sh in shape_list: val = np.random.rand(*sh) data = mx.nd.array(val) nd_out = mx.nd.contrib.arange_like(data, start=0) np_out = np.arange(start=0, stop=val.size) assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh)) test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32) test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32) test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16) test_arange() test_arange_inferstop() test_arange_like() test_arange_like_without_axis() @with_seed() def test_order(): ctx = default_context() def gt_topk(dat, axis, ret_typ, k, is_ascend): if ret_typ == "indices": if is_ascend: indices = np.arange(k) else: indices = np.arange(-1, -k-1, -1) ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap') elif ret_typ == "value": if is_ascend: indices = np.arange(k) else: indices = np.arange(-1, -k-1, -1) ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap') else: assert dat.shape == (5, 5, 5, 5) assert axis is None or axis == 1 ret = np.zeros(dat.shape) if is_ascend: indices = np.arange(k) else: indices = np.arange(-1, -k-1, -1) gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap') if axis is None: ret.ravel()[gt_argsort] = 1 else: for i in range(5): for j in range(5): for k in range(5): ret[i, gt_argsort[i, :, j, k], j, k] = 1 return ret dshape = (5, 5, 5, 5) a_npy = np.arange(np.prod(dshape)).astype(np.float32) np.random.shuffle(a_npy) a_npy = a_npy.reshape(dshape) a = mx.sym.Variable('a') def get_large_matrix(): data = np.array([np.arange(300096).astype(np.float32)]) data = np.repeat(data, 100, axis=0) np.apply_along_axis(np.random.shuffle, 1, data) return data large_matrix_npy = get_large_matrix() for axis in [1, 3, None]: for is_ascend in [True, False]: b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend) if axis is None: out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend) else: out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend) check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx) check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy]) b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5) check_symbolic_backward(sym=b, location={'a': large_matrix_npy}, out_grads=[np.random.normal(size=(100, 5))], expected=[np.zeros((100, 300096))]) check_symbolic_forward(b, location={'a': large_matrix_npy}, expected=[gt_topk(dat=large_matrix_npy, axis=1, ret_typ="indices", k=5, is_ascend=is_ascend)]) b = mx.sym.argsort(a, axis=1, is_ascend=False) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5, is_ascend=False)]) b = mx.sym.argmax(a, axis=1, keepdims=True) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1, is_ascend=False)]) b = mx.sym.argmin(a, axis=1, keepdims=True) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1, is_ascend=True)]) for dtype in [np.float16, np.float32, np.float64]: dshape = (5, 5, 5, 5) a_npy = np.arange(np.prod(dshape)).astype(dtype) np.random.shuffle(a_npy) a_npy = a_npy.reshape(dshape) a = mx.sym.Variable('a') for axis in [1, 3, None]: K = [1, 3, 5, 7] if axis is None else [1, 3, 5] for k in K: for is_ascend in [True, False]: b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k) out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend) check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx) check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy]) b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5) check_symbolic_backward(sym=b, location={'a': large_matrix_npy}, out_grads=[np.random.normal(size=(100, 5))], expected=[np.zeros((100, 300096))]) check_symbolic_forward(b, location={'a': large_matrix_npy}, expected=[gt_topk(dat=large_matrix_npy, axis=1, ret_typ="indices", k=5, is_ascend=is_ascend)]) b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 3))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3, is_ascend=False)]) b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3) check_symbolic_backward(sym=b, location={'a': a_npy}, out_grads=[np.random.normal(size=(5, 5, 5, 5))], expected=[np.zeros((5, 5, 5, 5))]) check_symbolic_forward(b, location={'a': a_npy}, expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3, is_ascend=True)]) @with_seed() def test_blockgrad(): a = mx.sym.Variable('a') b = mx.sym.BlockGrad(a) exe = b.simple_bind(ctx=default_context(), a=(10, 10)) a_npy = np.random.rand(10, 10) exe.forward(is_train=True, a=a_npy) assert_almost_equal(exe.outputs[0], a_npy) exe.backward() # No error if BlockGrad works @with_seed() def test_take(): def grad_helper(grad_in, axis, idx): if axis == 0: if axis == len(grad_in.shape) - 1: grad_in[idx] += 1.0 else: grad_in[idx, :] += 1.0 elif axis == 1: if axis == len(grad_in.shape) - 1: grad_in[:, idx] += 1.0 else: grad_in[:, idx, :] += 1.0 elif axis == 2: if axis == len(grad_in.shape) - 1: grad_in[:, :, idx] += 1.0 else: grad_in[:, :, idx, :] += 1.0 elif axis == 3: if axis == len(grad_in.shape) - 1: grad_in[:, :, :, idx] += 1.0 else: grad_in[:, :, :, idx, :] += 1.0 elif axis == 4: grad_in[:, :, :, :, idx] += 1.0 else: raise ValueError("axis %d is not supported..." % axis) def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True): data = mx.sym.Variable('a') idx = mx.sym.Variable('indices') idx = mx.sym.BlockGrad(idx) result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode) exe = result.simple_bind(default_context(), a=data_shape, indices=idx_shape, axis=axis, mode=mode) data_real = np.random.normal(size=data_shape).astype('float32') if out_of_range: idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape) if mode == 'raise': idx_real[idx_real == 0] = 1 idx_real *= data_shape[axis] else: idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape) if axis < 0: axis += len(data_shape) grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32') grad_in = np.zeros(data_shape, dtype='float32') exe.arg_dict['a'][:] = mx.nd.array(data_real) exe.arg_dict['indices'][:] = mx.nd.array(idx_real) exe.forward(is_train=True) if out_of_range and mode == 'raise': try: mx_out = exe.outputs[0].asnumpy() except MXNetError as e: return else: # Did not raise exception assert False, "did not raise %s" % MXNetError.__name__ assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode)) for i in np.nditer(idx_real): if mode == 'clip': i = np.clip(i, 0, data_shape[axis]) grad_helper(grad_in, axis, i) exe.backward([mx.nd.array(grad_out)]) assert_almost_equal(exe.grad_dict['a'], grad_in) def check_autograd_req(): row_len = 2 col_len = 8 shape = (row_len, col_len) sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32") sc.attach_grad() i = mx.nd.array([0], dtype="int64") j = mx.nd.array([0], dtype="int64") with mx.autograd.record(train_mode=True): xs = [] for _ in range(row_len): x_i = [] for _ in range(col_len): x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0) x_i.append(x_ij) j = j + 1 i = i + 1 j = j - col_len # reset j xs.append(mx.nd.stack(*x_i)) x = mx.nd.stack(*xs) x = x.sum() x.backward() assert_almost_equal(np.ones(sc.grad.shape), sc.grad) for mode in ['clip', 'wrap', 'raise']: for data_ndim in range(1, 5): for idx_ndim in range(1, 4): for axis in range(-data_ndim, data_ndim): data_shape = () for _ in range(data_ndim): data_shape += (np.random.randint(low=1, high=5), ) idx_shape = () for _ in range(idx_ndim): idx_shape += (np.random.randint(low=1, high=5), ) if mode == 'raise': check_output_n_grad(data_shape, idx_shape, axis, 'raise', False) check_output_n_grad(data_shape, idx_shape, axis, mode) check_autograd_req() @with_seed() def test_grid_generator(): # transform_type = affine test_case = [(20,21),(4,3),(6,12),(15,17)] for target_shape in test_case: affine_matrix = mx.sym.Variable('affine') grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape) exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write') # check forward exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]]) exe.forward(is_train=True) output = exe.outputs[0].asnumpy() output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0 output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0 xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1])) assert_almost_equal(output[0,0], yv.T) assert_almost_equal(output[0,1], xv.T) # check backward out_grad = np.random.normal(size=(1,2)+target_shape) exe.backward(mx.nd.array(out_grad)) tmp = np.zeros((3,target_shape[0]*target_shape[1])) tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1)) tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1)) tmp[2] = 1 grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6) assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5) # check addto exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add') grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape) exe.grad_dict['affine'][:] = grid_grad_npy exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]]) exe.forward(is_train=True) exe.backward(mx.nd.array(out_grad)) assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5) # transform_type = warp test_case = [(12,21),(4,3),(6,12)] for target_shape in test_case: flow = mx.sym.Variable('flow') grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape) exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write') # check forward exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape) exe.forward(is_train=True) output = exe.outputs[0].asnumpy() output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0 output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0 xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1) assert_almost_equal(output[0,0], yv.T) assert_almost_equal(output[0,1], xv.T) # check backward out_grad = np.random.normal(size=(1,2)+target_shape) exe.backward(mx.nd.array(out_grad)) grad_est = np.zeros((1,2)+target_shape) grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0) grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0) assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3) # check addto exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add') flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape) exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape) exe_add.grad_dict['flow'][:] = flow_grad_npy exe_add.forward(is_train=True) exe_add.backward(mx.nd.array(out_grad)) assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5) @with_seed() def test_index2d(): for _ in range(30): n = np.random.randint(1, 100) m = np.random.randint(1, 500) data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context()) x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32') r = mx.nd.batch_take(data, x) assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()]) @with_seed() def test_cast(): for srctype in [np.int32, np.float32, np.float16]: for dsttype in [np.float32, np.int32, np.float16]: x = mx.sym.Variable('x', dtype=srctype) y = mx.sym.Cast(x, dtype=dsttype) exe = y.simple_bind(ctx=default_context(), x=(10, 10)) assert exe.arg_arrays[0].dtype == srctype assert exe.outputs[0].dtype == dsttype X = np.random.uniform(-10, 10, size=(10, 10)) exe.arg_arrays[0][:] = X exe.forward(is_train=True) exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context())) assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5) assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5) def get_cast_op_data(): FP16_FRACTION_BITS = 10 FP32_FRACTION_BITS = 23 FP32_EXP_MIN = -126 FP32_EXP_MAX = 127 # generate test cases in the vicinity of representable float16 mantissas # and mid-way between them, but over the full range of float32 exponents. for sign_bit in [0, 1]: for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2): denominator = 2**(FP16_FRACTION_BITS + 1) for numerator in range(0, denominator): fraction = numerator / float(denominator) for y in [-1.0, 0.0, 1.0]: small_delta = y / 2**FP32_FRACTION_BITS val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta) yield val # Add np.nan as a final data value to process yield np.nan # Test requires all platforms to round float32->float16 with same round-to-nearest-even policy. @with_seed() def test_cast_float32_to_float16(): input_np = np.array(list(get_cast_op_data())).astype(np.float32) # The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed # as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722 expected_output = input_np.astype(np.float64).astype(np.float16) def check_cast(op, input_np, expected_output): x = mx.sym.Variable('x', dtype=np.float32) sym = op(x, dtype=np.float16) ctx = default_context() exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)}) assert exe.arg_arrays[0].dtype == np.float32 assert exe.outputs[0].dtype == np.float16 exe.forward(is_train=True) sym_output = exe.outputs[0].asnumpy() for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output): assert (model_fp16_val == np_fp16_val) or \ (np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \ 'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format( fp32_val, model_fp16_val, np_fp16_val) check_cast(mx.sym.Cast, input_np, expected_output) check_cast(mx.sym.amp_cast, input_np, expected_output) @with_seed() def test_amp_multicast(): x = mx.sym.Variable('x', dtype=np.float16) y = mx.sym.Variable('y', dtype=np.float32) z = mx.sym.Variable('z', dtype=np.float16) ctx = default_context() res = mx.sym.amp_multicast(x, y, z, num_outputs=3) exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx), 'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx), 'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)}) exe.forward(is_train=True) out1, out2, out3 = exe.outputs assert out1.asnumpy().dtype == np.float32 assert out2.asnumpy().dtype == np.float32 assert out3.asnumpy().dtype == np.float32 def check_amp_multicast(input_np, expected_output): x = mx.sym.Variable('x', dtype=np.float16) y = mx.sym.Variable('y', dtype=np.float32) z = mx.sym.Variable('z', dtype=np.float16) ctx = default_context() res = mx.sym.amp_multicast(x, y, z, num_outputs=3) exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx), 'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx), 'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)}) exe.forward(is_train=True) sym_output = exe.outputs[0].asnumpy() for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output): assert (model_fp16_val == np_fp16_val) or \ (np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \ 'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format( fp32_val, model_fp16_val, np_fp16_val) input_np = np.array(list(get_cast_op_data()), dtype=np.float16) expected_output = input_np.astype(np.float32) check_amp_multicast(input_np, expected_output) @with_seed() def test_all_finite(): data = mx.sym.Variable("data", dtype=np.float32) data2 = mx.sym.Variable("data2", dtype=np.float32) finite_arr = mx.nd.array([[0, 0]]) inf_arr = mx.nd.array([[np.inf, np.inf]]) z = mx.sym.all_finite(data) ctx = default_context() exe = z.bind(ctx, {'data': inf_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 0 exe = z.bind(ctx, {'data': finite_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 1 z = mx.sym.multi_all_finite(data, data2, num_arrays=2) exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 0 z = mx.sym.multi_all_finite(data, data2, num_arrays=2) exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr}) exe.forward(is_train=False) sym_output = exe.outputs[0].asnumpy() assert sym_output[0] == 1 @with_seed() def test_repeat(): def test_repeat_forward(): ndim_max = 6 # max number of dims of the ndarray size_max = 10 # max number of elements in each dim repeats = 3 for ndim in range(1, ndim_max+1): shape = () for i in range(0, ndim): shape += (np.random.randint(1, size_max+1), ) a = np.random.random_sample(size=shape) aa = np.repeat(a, repeats) b = mx.nd.array(a, ctx=default_context()) bb = mx.nd.repeat(b, repeats) assert_almost_equal(aa, bb) for axis in range(0, ndim): aa = np.repeat(a, repeats, axis) bb = mx.nd.repeat(b, repeats, axis) assert_almost_equal(aa, bb) def test_repeat_backward(axis): data = mx.sym.Variable('data') n1 = 3 n2 = 4 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) repeats = 2 test = mx.sym.repeat(data, repeats=repeats, axis=axis) exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad]) npout_grad = np.random.randint(0, 10, n1 * n2 * repeats) if axis == 0: npout_grad = npout_grad.reshape(n1 * repeats, n2) elif axis == 1: npout_grad = npout_grad.reshape(n1, n2 * repeats) else: raise RuntimeError("Invalid axis value") out_grad = mx.nd.array(npout_grad) exe.backward(out_grad) expected_grad = np.zeros(shape) if axis == 0: for i in range(shape[0]): for j in range(shape[1]): k = i * repeats expected_grad[i][j] = sum(npout_grad[k:k + repeats, j]) elif axis == 1: for j in range(shape[1]): for i in range(shape[0]): k = j * repeats expected_grad[i][j] = sum(npout_grad[i, k:k + repeats]) else: raise RuntimeError("Invalid axis value") assert_almost_equal(expected_grad, arr_grad, rtol=1e-3) def test_repeat_numeric_gradient(): data = mx.sym.Variable('data') n1 = 3 n2 = 4 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) repeats = 2 test = mx.sym.repeat(data, repeats=repeats, axis=0) check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2) test_repeat_forward() test_repeat_backward(axis=0) test_repeat_backward(axis=1) test_repeat_numeric_gradient() @with_seed() def test_reverse(): data = mx.symbol.Variable('data') shape = (5, 5, 5) data_tmp = np.random.uniform(-1, 1, shape) test = mx.sym.reverse(data, axis=[1, 2]) grad = np.random.uniform(-1, 1, shape) check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2) check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]]) check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]]) @with_seed() def test_tile(): def test_normal_case(): ndim_min = 1 ndim_max = 5 # max number of dims of the ndarray size_max = 10 # max number of elements in each dim length_max = 3 # max length of reps rep_max = 10 # max number of tiling in each dim for ndim in range(ndim_min, ndim_max+1): shape = [] for i in range(1, ndim+1): shape.append(np.random.randint(1, size_max+1)) shape = tuple(shape) a = np.random.randint(0, 100, shape) b = mx.nd.array(a, dtype=a.dtype) reps_len = np.random.randint(1, length_max+1) reps_tuple = () for i in range(1, reps_len): reps_tuple += (np.random.randint(1, rep_max), ) reps_array = np.asarray(reps_tuple) a_tiled = np.tile(a, reps_array) b_tiled = mx.nd.tile(b, reps_tuple).asnumpy() assert same(a_tiled, b_tiled) def test_empty_tensor(): shape = (2, 3, 0, 4) with mx.np_shape(): a = np.array([], dtype=np.int32).reshape(shape) b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype) reps = (2, 4, 6) a_tiled = np.tile(a, reps) b_tiled = mx.nd.tile(b, reps).asnumpy() assert same(a_tiled, b_tiled) def test_empty_reps(): a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32) b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype) a_tiled = np.tile(a, ()) b_tiled = mx.nd.tile(b, ()).asnumpy() assert same(a_tiled, b_tiled) def test_tile_backward(): data = mx.sym.Variable('data') n1 = 2 n2 = 2 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) arr_data = mx.nd.array(data_tmp) arr_grad = mx.nd.empty(shape) reps1 = 2 reps2 = 2 reps = (reps1, reps2) test = mx.sym.tile(data, reps=reps) exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad]) npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2) out_grad = mx.nd.array(npout_grad) exe.backward(out_grad) expected_grad = np.zeros(shape) for i in range(shape[0]): for j in range(shape[1]): expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2])) assert_almost_equal(expected_grad, arr_grad, rtol=1e-3) def test_tile_numeric_gradient(): data = mx.sym.Variable('data') n1 = 2 n2 = 2 shape = (n1, n2) data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape) reps1 = 2 reps2 = 2 reps = (reps1, reps2) test = mx.sym.tile(data, reps=reps) check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2) def test_invalid_reps(): data = mx.nd.arange(16).reshape((4, 4)) assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3)) assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3)) test_normal_case() with mx.np_shape(): test_empty_tensor() test_empty_reps() test_tile_backward() test_tile_numeric_gradient() test_invalid_reps() @with_seed() def test_one_hot(): def test_normal_case(index_type=np.int32): ndim_max = 6 dim_size_max = 20 depth = int(dim_size_max / 2) on_value = 1 off_value = 0 for ndim in range(1, ndim_max+1): shape = () for i in range(1, ndim+1): shape += (np.random.randint(1, dim_size_max+1), ) indices = np.random.randint(-dim_size_max, dim_size_max+1, size=np.prod(shape)).reshape(shape) mx_one_hot_array = mx.nd.one_hot( mx.nd.array(indices, ctx=default_context(), dtype=index_type), depth=depth, dtype=np.int32) expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32) expected_array[:] = off_value indices_1d = indices.flatten() row = 0 for idx in indices_1d: if 0 <= idx < depth: expected_array[row, idx] = on_value row += 1 expected_array = expected_array.reshape(shape + (depth, )) one_hot_array = mx_one_hot_array.asnumpy() assert same(expected_array, one_hot_array) def test_empty_indices(): shape = (2, 0, 9, 3) with mx.np_shape(): indices = np.array([]).reshape(shape) depth = 10 mx_one_hot_array = mx.nd.one_hot( mx.nd.array(indices, ctx=default_context(), dtype=np.int32), depth=depth, dtype=np.int32 ).asnumpy() expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,)) assert same(expected_array, mx_one_hot_array) def test_zero_depth(): shape = (2, 4, 9, 3) indices = np.ones(shape) depth = 0 mx_one_hot_array = mx.nd.one_hot( mx.nd.array(indices, ctx=default_context(), dtype=np.int32), depth=depth, dtype=np.int32).asnumpy() expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, )) assert same(expected_array, mx_one_hot_array) test_normal_case(index_type=np.int32) test_normal_case(index_type=np.float64) test_normal_case(index_type=np.float32) test_normal_case(index_type=np.float16) with mx.np_shape(): test_empty_indices() test_zero_depth() @with_seed() def test_where(): def get_forward_expected_output(condition, x, y): original_shape = x.shape out = np.zeros(original_shape) if condition.shape == x.shape: for index, c in np.ndenumerate(condition): if c != 0: out[index] = x[index] else: out[index] = y[index] elif condition.shape == (x.shape[0], ): s = x.shape m = s[0] n = int(np.prod(s)/s[0]) x2d = x.reshape((m, n)) y2d = y.reshape((m, n)) out = out.reshape((m, n)) for i in range(0, m): if condition[i] != 0: for j in range(0, n): out[i, j] = x2d[i, j] else: for j in range(0, n): out[i, j] = y2d[i, j] else: raise RuntimeError("Invalid condition shape for where op") out = out.reshape(original_shape) return out def get_forward_inputs_same_shape(shape): condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape) x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape) y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape) return condition_np, x_np, y_np def get_forward_inputs_condition_vector(shape): condition_np = np.random.randint(0, 2, shape[0]) x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape) y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape) return condition_np, x_np, y_np def get_backward_input(shape): return np.random.randint(20, 30, np.prod(shape)).reshape(shape) def get_backward_expected_outputs(grad_in, condition): shape = grad_in.shape grad_cond = np.zeros(condition.shape) grad_x = np.empty(shape) grad_y = np.empty(shape) for index, c in np.ndenumerate(condition): if 0 != c: grad_x[index] = grad_in[index] grad_y[index] = 0 else: grad_x[index] = 0 grad_y[index] = grad_in[index] return grad_cond, grad_x, grad_y def test_where_helper(shape, same_shape): if same_shape: condition_np, x_np, y_np = get_forward_inputs_same_shape(shape) else: condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape) out_expected = get_forward_expected_output(condition_np, x_np, y_np) grad_in_np = get_backward_input(shape) grad_expected_cond, grad_expected_x, grad_expected_y\ = get_backward_expected_outputs(grad_in_np, condition_np) condition = mx.sym.Variable('condition') x = mx.sym.Variable('x') y = mx.sym.Variable('y') grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32) where_sym = mx.sym.where(condition, x, y) # test req='write' where_exe_write = where_sym.simple_bind(ctx=default_context(), condition=condition_np.shape, x=x_np.shape, y=y_np.shape, grad_req='write') # test forward req='write' outputs = where_exe_write.forward(is_train=True, condition=condition_np, x=x_np, y=y_np) assert same(outputs[0].asnumpy(), out_expected) # test backward req='write' where_exe_write.backward(grad_in_mx) assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x) assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y) assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond) # test req='add' x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape) y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape) where_exe_add = where_sym.simple_bind(ctx=default_context(), condition=condition_np.shape, x=x_np.shape, y=y_np.shape, grad_req='add') where_exe_add.grad_dict['x'][:] = x_grad_init where_exe_add.grad_dict['y'][:] = y_grad_init # test forward req='add' outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np) assert same(outputs[0].asnumpy(), out_expected) # test backward req='add' where_exe_add.backward(grad_in_mx) x_ograd = where_exe_add.grad_dict['x'].asnumpy() y_ograd = where_exe_add.grad_dict['y'].asnumpy() assert same(x_ograd, grad_expected_x+x_grad_init) assert same(y_ograd, grad_expected_y+y_grad_init) def test_where_numeric_gradient(shape, same_shape): condition = mx.sym.Variable('condition') x = mx.sym.Variable('x') y = mx.sym.Variable('y') where_sym = mx.sym.where(condition, x, y) if same_shape: condition_np, x_np, y_np = get_forward_inputs_same_shape(shape) else: condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape) check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y']) def test_invalid_shape(): condition = mx.sym.Variable('condition') x = mx.sym.Variable('x') y = mx.sym.Variable('y') where_sym = mx.sym.where(condition, x, y) assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]), y=mx.nd.array([[8,9],[10,11],[12,13]]), condition=mx.nd.array([1,0])), MXNetError) assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]), y=mx.nd.array([[8,9],[10,11],[12,13]]), condition=mx.nd.array([1,0])), MXNetError) def test_1d_cond(): cond = mx.nd.array([1, 0, 1]) x = mx.nd.array([[2, 3], [4, 5], [6, 7]]) y = mx.nd.array([[7, 8], [9, 10], [10, 11]]) expect_out = np.array([[2, 3], [9, 10], [6, 7]]) out = mx.nd.where(cond, x, y).asnumpy() assert(expect_out.all() == out.all()) test_where_helper((5, 9), True) test_where_helper((5, 9), False) test_where_helper((5, 7, 9), True) test_where_helper((5, 7, 9), False) test_where_helper((10, 8, 15, 3), True) test_where_helper((10, 8, 15, 3), False) test_where_numeric_gradient((5, 9), True) test_where_numeric_gradient((5, 9), False) test_where_numeric_gradient((5, 7, 9), True) test_where_numeric_gradient((5, 7, 9), False) test_invalid_shape() test_1d_cond() @with_seed() def test_softmin(): for ndim in range(1, 5): for dtype in [np.float16, np.float32, np.float64]: rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3) shape = np.random.randint(1, 5, size=ndim) axis = np.random.randint(-ndim, ndim) data = np.random.uniform(-2, 2, size=shape).astype(dtype) data = data / 10 if dtype is np.float16 else data sym = mx.sym.softmin(axis=axis) expected_fwd = np_softmax(-data, axis=axis) expected_bwd = np.zeros(shape) check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype) for req in ['null', 'add', 'write']: check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd], rtol=rtol, atol=atol, grad_req=req, dtype=dtype) if dtype is not np.float16: check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype) @with_seed() def test_new_softmax(): for ndim in range(1, 5): shape = np.random.randint(1, 5, size=ndim) axis = np.random.randint(-ndim, ndim) data = np.random.uniform(-2, 2, size=shape) sym = mx.sym.softmax(axis=axis) expected_fwd = np_softmax(data, axis=axis) expected_bwd = np.zeros(shape) check_symbolic_forward(sym, [data], [expected_fwd]) for req in ['null', 'add', 'write']: check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd], rtol=1e-2, atol=1e-3, grad_req=req) check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3) @with_seed() def test_softmax_with_temperature(): for ndim in range(1, 5): shape = np.random.randint(1, 5, size=ndim) data = np.random.uniform(-2, 2, size=shape) for temp in range(1, 11): sym = mx.sym.softmax(axis=0, temperature=temp) expected_fwd = np_softmax(data, axis=0, temperature=temp) expected_bwd = np.zeros(shape) check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3) check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3) check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3) @with_seed() def test_log_softmax(): for ndim in range(1, 5): for _ in range(5): shape = np.random.randint(1, 5, size=ndim) axis = np.random.randint(0, ndim) data = np.random.uniform(-2, 2, size=shape) sym = mx.sym.log_softmax(axis=axis-ndim) check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)]) check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3) def test_softmax_with_large_inputs(): def softmax_forward(input_data, true_output): data = mx.sym.Variable('data') out1 = data.softmax(axis=1) exec1 = out1.bind(default_context(), args={'data': input_data}) exec1.forward()[0].wait_to_read() ndarr = exec1.outputs[0][0][0][0] assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5) softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0])) @with_seed() def test_softmax_dtype(): def check_dtypes_almost_equal(op_name, atol, rtol, grad_atol, grad_rtol, idtype, ref_dtype, odtype=None): op = getattr(mx.nd, op_name) input_data = mx.random.uniform(shape=(100, 500)) dtype_input = input_data.astype(idtype) ref_input = input_data.astype(ref_dtype) dtype_input.attach_grad() ref_input.attach_grad() with mx.autograd.record(): dtype_softmax = op(dtype_input, axis=-1, dtype=odtype) ref_softmax = op(ref_input, axis=-1, dtype=odtype) assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol) dtype_softmax.backward() ref_softmax.backward() assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol) import sys is_windows = sys.platform.startswith('win') enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0") if not is_windows or enforce_safe_acc == "1": os.environ["MXNET_SAFE_ACCUMULATION"] = "1" check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32') check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32') check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64') check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64') check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64') check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2, 'float16', 'float32') check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2, 'float16', 'float32', 'float32') check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3, 'float32', 'float64') check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3, 'float32', 'float64', 'float64') @with_seed() def test_softmax_with_length(): def np_softmax_with_length(data, length): res = np.zeros(data.shape) for i in range(length.shape[0]): for j in range(length.shape[1]): leng = int(length[i, j]) res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j]) return res ndim = 3 shape = rand_shape_nd(ndim, dim=10) len_shape = list(shape) del len_shape[1] len_shape = tuple(len_shape) for dtype in [np.float16, np.float32, np.float64]: mx_data = rand_ndarray(shape, dtype=dtype) np_data = mx_data.asnumpy() np_length = np.random.randint(1, shape[1] + 1, len_shape) mx_length = mx.nd.array(np_length, dtype=np.int32) np_out = np_softmax_with_length(np_data, np_length) data = mx.sym.Variable("data") length = mx.sym.Variable("length") mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1) location = {"data": mx_data, "length": mx_length} rtol = 1e-2 if dtype == np.float16 else 1e-3 atol = 1e-4 if dtype == np.float16 else 1e-5 check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy") check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)], [np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=1e-3, dtype="asnumpy") @with_seed() def test_pick(): def test_pick_helper(index_type=np.int32): for mode in ['clip', 'wrap']: ndim = np.random.randint(1, 5) bshape = np.random.randint(1, 10, size=ndim) axis = np.random.randint(0, ndim) sshape = bshape.copy() sshape[axis] = 1 data = np.random.uniform(-1, 1, size=bshape) if mode == 'wrap': index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape) else: index = np.random.randint(0, bshape[axis], size=sshape) exp = [] for i in range(ndim): if i == axis: if mode == 'wrap': exp.append(index % bshape[axis]) else: exp.append(index) else: ishape = [1 for _ in range(ndim)] ishape[i] = bshape[i] exp.append(np.arange(bshape[i]).reshape(ishape)) expected = data[exp] data = mx.nd.array(data, dtype='float32') index = mx.nd.array(index, dtype=index_type) out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode) assert_almost_equal(out.asnumpy(), expected) data_holder = data index_holder = index data = mx.sym.Variable('data') index = mx.sym.Variable('index') sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode) check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data']) test_pick_helper(np.int32) test_pick_helper(np.float32) def check_ctc_loss(acts, labels, loss_truth, contrib=False): in_var = mx.sym.Variable('input') labels_var = mx.sym.Variable('labels') if contrib: ctc = mx.sym.contrib.ctc_loss(in_var, labels_var) else: ctc = mx.sym.ctc_loss(in_var, labels_var) acts_nd = mx.nd.array(acts, ctx=default_context()) labels_nd = mx.nd.array(labels, ctx=default_context()) exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd]) # test forward with grad calc exe.forward(is_train=True) outTest = exe.outputs[0].copy() # test forward without grad calc exe.forward(is_train=False) outTrain = exe.outputs[0] # make sure losses calculated with both modes are the same assert_almost_equal(outTest, outTrain) # test against ground truth, if available if loss_truth is not None: assert_almost_equal(outTest, loss_truth) # test grad check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3) @with_seed() def test_ctc_loss(): # Test 1: check that batches are same + check against Torch WarpCTC acts = np.array([ [[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]], [[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]], [[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]], dtype=np.float32) labels = np.array([[2, 3, 0], [2, 3, 0]]) true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch for contrib in [False, True]: check_ctc_loss(acts, labels, true_loss, contrib=contrib) # Test 2: acts2 = np.array([ [[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]], [[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]], [[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32) labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32) true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch for contrib in [False, True]: check_ctc_loss(acts2, labels2, true_loss, contrib=contrib) # Test 3: check use integer type as label labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32) true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch for contrib in [False, True]: check_ctc_loss(acts2, labels3, true_loss, contrib=contrib) @with_seed() def test_ctc_loss_with_large_classes(): ctx = default_context() num_classes = 6000 seq_len = 8 batch_size = 2 data = np.empty((num_classes, 0)) for i in range(seq_len * batch_size) : row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1) data = np.append(data, row/13, axis=1) data = data.reshape(seq_len, batch_size, num_classes) label = np.array([ [100, 200, 300, 400, 500, 0, 0, 0], [1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32) nd_data = mx.nd.array(data) nd_label = mx.nd.array(label) loss = mx.nd.ctc_loss(data=nd_data, label=nd_label) expected_loss = np.array([688.02826, 145.34462]) assert_almost_equal(loss, expected_loss) @with_seed() def test_ctc_loss_grad(): def check_ctc_loss_grad(blank_label, contrib=False): # from tf vocab_size = 5 max_label_len = 5 padding_mask = -1+ (blank_label=='first') targets_0 = [0, 1, 2, 1, 0] loss_log_prob_0 = -3.34211 input_prob_matrix_0 = np.asarray( [[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688], [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533], [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]], dtype=np.float32) gradient_log_prob_0 = np.asarray( [[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436], [0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688], [0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533], [-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]], dtype=np.float32) targets_1 = [0, 1, 1, 0] loss_log_prob_1 = -5.42262 input_prob_matrix_1 = np.asarray( [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508], [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549], [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456], [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345], [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]], dtype=np.float32) gradient_log_prob_1 = np.asarray( [[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508], [0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549], [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544], [0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345], [-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]], dtype=np.float32) inputs = [ np.vstack( [input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]]) for t in range(5) ] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)] inputs = np.log(np.asarray(inputs, dtype=np.float32)) grad_truth = np.array([ np.vstack( [gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]]) for t in range(5) ] + 2 * [np.zeros((2, vocab_size+1), np.float32)]) if blank_label == 'first': inputs = np.roll(inputs, 1, axis=2) grad_truth = np.roll(grad_truth, 1, axis=2) labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x)) for x in [targets_0, targets_1]])+(blank_label == 'first')) seq_lens = np.array([5, 5], dtype=np.int32) label_lens = np.array([5, 4], dtype=np.int32) loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32) with default_context(): data = mx.nd.array(inputs) label = mx.nd.array(labels) data.attach_grad() with mx.autograd.record(): if contrib: l = mx.contrib.ndarray.CTCLoss(data, label, use_data_lengths=True, use_label_lengths=True, data_lengths=mx.nd.array(seq_lens), label_lengths=mx.nd.array(label_lens), blank_label=blank_label) else: l = mx.ndarray.CTCLoss(data, label, use_data_lengths=True, use_label_lengths=True, data_lengths=mx.nd.array(seq_lens), label_lengths=mx.nd.array(label_lens), blank_label=blank_label) l.backward() assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5) assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5) for contrib in [False, True]: for label in ['first', 'last']: check_ctc_loss_grad(label, contrib=contrib) @with_seed() def test_quantization_op(): min0 = mx.nd.array([0.0]) max0 = mx.nd.array([1.0]) a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]]) qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8') a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32') qa_real = mx.nd.array([[18, 75], [77, 109]]) a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]]) print(a_.asnumpy()) print(a_real.asnumpy()) assert same(qa.asnumpy(), qa_real.asnumpy()) assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2) @with_seed() def test_index_copy(): x = mx.nd.zeros((5,3)) t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]]) index = mx.nd.array([0,4,2], dtype=np.int64) tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]]) x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]]) t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]]) t.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.index_copy(x, index, t) out.backward() assert same(out.asnumpy(), tensor.asnumpy()) assert same(t.grad.asnumpy(), t_grad.asnumpy()) x.attach_grad() t.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.index_copy(x, index, t) out.backward() assert same(out.asnumpy(), tensor.asnumpy()) assert same(x.grad.asnumpy(), x_grad.asnumpy()) assert same(t.grad.asnumpy(), t_grad.asnumpy()) @with_seed() def test_boolean_mask(): data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]]) index = mx.nd.array([0, 1, 0]) data.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.boolean_mask(data, index) out.backward() data.grad.wait_to_read() expected = np.array([[4, 5, 6]]) expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]]) assert same(out.asnumpy(), expected) assert same(data.grad.asnumpy(), expected_grad) # test 0-size output mx.set_np_shape(True) data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]]) index = mx.nd.array([0, 0, 0]) data.attach_grad() with mx.autograd.record(): out = mx.nd.contrib.boolean_mask(data, index) out.backward() data.grad.wait_to_read() expected = np.zeros((0, 3)) expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) assert same(out.asnumpy(), expected) assert same(data.grad.asnumpy(), expected_grad) mx.set_np_shape(False) # test gradient shape = (100, 30) a = mx.nd.random.randint(0, 100, shape=shape) a.attach_grad() bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50 ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50 mx_grad = mx.nd.zeros_like(a) mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add') T = 3 for _ in range(T): with mx.autograd.record(): b = mx.nd.contrib.boolean_mask(a, bi) c = mx.nd.contrib.boolean_mask(a, ci) su = b.sum() + c.sum() su.backward() grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1)) grad = np.tile(grad, (1,) + shape[1:]) # T times grad *= T assert_allclose(a.grad.asnumpy(), grad) a_np = a.asnumpy() assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')]) assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')]) @with_seed() def test_div_sqrt_dim(): data_tmp = np.random.normal(0, 1, (5, 10, 8)) data = mx.symbol.Variable('data') test = mx.sym.contrib.div_sqrt_dim(data) check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2) check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])]) @with_seed() def test_reciprocal_op(): eps = 2**(-11) data_tmp = np.random.rand(3, 4) * 10 - 5 # Avoid possible division by 0 errors and finite difference method inaccuracies. # Factor of 6 below set empirically, depends on eps. # Issue exposed by seed 879579887. # Replace problematic inputs with 1.0. data_tmp[abs(data_tmp) < 6*eps] = 1.0 data = mx.symbol.Variable('data') test = mx.sym.reciprocal(data) check_numeric_gradient(test, [data_tmp], numeric_eps = eps) check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)]) @with_seed() def test_cbrt_op(): eps = 2**(-11) data_tmp = np.random.rand(3, 4) * 10 - 5 # Avoid finite difference method inaccuracies due to infinite gradient at the origin. # Factor of 4 below set empirically, depends on eps. # Issue exposed by seed 553872106. # Replace problematic inputs with 1.0. data_tmp[abs(data_tmp) < 4*eps] = 1.0 data = mx.symbol.Variable('data') test = mx.sym.cbrt(data) check_numeric_gradient(test, [data_tmp], numeric_eps=eps) check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)]) @with_seed() def test_rcbrt_op(): eps = 2**(-11) data_tmp = np.random.rand(3, 4) * 10 - 5 # Avoid possible division by 0 errors and finite difference method inaccuracies. # Factor of 4 below set empirically, depends on eps. # Issue exposed by seed 788174893. # Replace problematic inputs with 1.0. data_tmp[abs(data_tmp) < 4*eps] = 1.0 data = mx.symbol.Variable('data') test = mx.sym.rcbrt(data) check_numeric_gradient(test, [data_tmp], numeric_eps = eps) check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)]) @with_seed() def test_custom_op(): class Sqr(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): if in_data[0].stype == 'default': aux[0][:] = 1 self.assign(out_data[0], req[0], in_data[0]*in_data[0]) else: inp = in_data[0] csr_m = inp.data * inp.data out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape) self.assign(out_data[0], req[0], out) if (in_data[0].stype == 'csr'): assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray)) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0])) if in_data[0].stype == 'default': assert (aux[0].asnumpy() == 1).all() @mx.operator.register("sqr") class SqrProp(mx.operator.CustomOpProp): def __init__(self): super(SqrProp, self).__init__(need_top_grad=True) def list_arguments(self): return ['data'] def list_outputs(self): return ['output'] def list_auxiliary_states(self): return ['aux'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]], [in_shape[0]] def infer_type(self, in_type): return in_type, [in_type[0]], [in_type[0]] def infer_storage_type(self, in_stype): if in_stype[0] == 'default': return ['default'], ['default'], ['default'] return ['csr'], ['csr'], ['csr'] def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype): if in_stype[0] == 'default': return ['default'], ['default'], ['default'], ['default'], ['default'] return ['default'], ['csr'], ['csr'], ['csr'], ['csr'] def create_operator(self, ctx, shapes, dtypes): return Sqr() data = mx.symbol.Variable('data') aux = mx.symbol.Variable('aux') op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr') x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) aux = mx.nd.zeros_like(x) check_numeric_gradient(op, [x], [aux]) data = mx.symbol.cast(data, dtype='float64') op = mx.symbol.cast(op, dtype='float32') check_numeric_gradient(op, [x], [aux]) data = mx.symbol.Variable('data', stype='csr') aux = mx.symbol.Variable('aux') op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr') x = x.tostype('csr') aux = mx.nd.zeros_like(x) check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"}) x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) x2 = x2.tostype('csr') aux2 = mx.nd.zeros_like(x2) x2.attach_grad() with mx.autograd.record(): output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr') output.backward() expected_output = mx.nd.sparse.square(x2) expected_grad = 2 * x2 rtol = 1e-4 atol = 1e-6 assert_almost_equal(output, expected_output, rtol=rtol, atol=atol) assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol) # test for backward compatibility, i.e. the correctness of default implementation of # infer storage in custom operator class Mult(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], in_data[0]*in_data[1]) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], in_data[1]) self.assign(in_grad[1], req[1], in_data[0]) @mx.operator.register("mult") class MultProp(mx.operator.CustomOpProp): def __init__(self): super(MultProp, self).__init__(need_top_grad=True) def list_arguments(self): return ['lhs', 'rhs'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]], [] def create_operator(self, ctx, shapes, dtypes): return Mult() lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10))) lhs.attach_grad() rhs.attach_grad() with mx.autograd.record(): y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult') y.backward() assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol) assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol) class MultNoGrad(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], in_data[0]*in_data[1]) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], in_data[1]) self.assign(in_grad[1], req[1], in_data[0]) @mx.operator.register("mult_no_grad") class MultNoGradProp(mx.operator.CustomOpProp): def __init__(self): super(MultNoGradProp, self).__init__(need_top_grad=False) def list_arguments(self): return ['lhs', 'rhs'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]], [] def create_operator(self, ctx, shapes, dtypes): return MultNoGrad() def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype): return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype with mx.autograd.record(): y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad") y2.backward() assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol) assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol) class NoInputOp(mx.operator.CustomOp): def __init__(self, length, depth): super(NoInputOp, self).__init__() self.output = np.ones(shape=(length, depth), dtype=np.float32) def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], self.output) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass @mx.operator.register("no_input_op") class NoInputOpProp(mx.operator.CustomOpProp): def __init__(self, length, depth): super(NoInputOpProp, self).__init__() self.length = int(length) self.depth = int(depth) def list_arguments(self): return [] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return [], [(self.length, self.depth)], [] def infer_type(self, in_type): return [], [np.float32], [] def create_operator(self, ctx, shapes, dtypes): return NoInputOp(length=self.length, depth=self.depth) with mx.autograd.record(): x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op") assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32)) @with_seed() def test_custom_op_fork(): # test custom operator fork # see https://github.com/apache/incubator-mxnet/issues/14396 class AdditionOP(mx.operator.CustomOp): def __init__(self): super(AdditionOP, self).__init__() def forward(self, is_train, req, in_data, out_data, aux): out_data[0][:] = in_data[0] + in_data[1] def backward(self, req, out_grad, in_data, out_data, in_grad, aux): in_grad[0][:] = out_grad[0] in_grad[1][:] = out_grad[0] @mx.operator.register("AdditionOP") class AdditionOPProp(mx.operator.CustomOpProp): def __init__(self): super(AdditionOPProp, self).__init__() def list_arguments(self): return ['a', 'b'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [in_shape[0]] def create_operator(self, ctx, shapes, dtypes): return AdditionOP() if not sys.platform.startswith('win'): # no fork in windows def custom_add(): a = mx.nd.array([1, 2, 3]) b = mx.nd.array([4, 5, 6]) c = mx.nd.Custom(a, b, op_type='AdditionOP') assert_almost_equal((a + b).asnumpy(), c.asnumpy()) custom_add() from multiprocessing import Process p = Process(target=custom_add) p.daemon = True p.start() p.join(5) assert not p.is_alive() and p.exitcode == 0 def _build_dot_custom(fun_forward, name): class Dot(mx.operator.CustomOp): def __init__(self): super(Dot, self).__init__() def forward(self, is_train, req, in_data, out_data, aux): fun_forward(in_data, out_data) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass @mx.operator.register(name) class DotProp(mx.operator.CustomOpProp): def __init__(self): super(DotProp, self).__init__() def list_arguments(self): return ['a', 'b'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return in_shape, [(in_shape[0][0], in_shape[1][1])] def create_operator(self, ctx, shapes, dtypes): return Dot() @with_seed() def test_custom_op_exc(): # test except handling # see https://github.com/apache/incubator-mxnet/pull/14693 # 1. error in python code def custom_exc1(): def f(in_data, out_data): assert False out_data[0][:] = mx.nd.dot(in_data[0], in_data[1]) _build_dot_custom(f, 'Dot1') a = mx.nd.zeros((4, 1)) b = mx.nd.zeros((1, 4)) c = mx.nd.Custom(a, b, op_type='Dot1') c.wait_to_read() assert_raises(MXNetError, custom_exc1) # 2. error in pushing operator to engine def custom_exc2(): def f(in_data, out_data): out_data[0][:] = mx.nd.dot(in_data[0], in_data[1]) _build_dot_custom(f, 'Dot2') a = mx.nd.zeros((4, 2)) b = mx.nd.zeros((1, 4)) # trigger error by invalid input shapes of operands c = mx.nd.Custom(a, b, op_type='Dot2') c.wait_to_read() assert_raises(MXNetError, custom_exc2) # 3. error in real execution if default_context().device_type == 'cpu': def custom_exc3(): def f(in_data, out_data): dot = mx.nd.dot(in_data[0], in_data[1]) # input to Cholesky factorization should be # symmetric positive-definite, error will be # triggered in op execution on cpu out_data[0][:] = mx.nd.linalg.potrf(dot) out_data[0].wait_to_read() _build_dot_custom(f, 'Dot3') a = mx.nd.zeros((2, 1)) b = mx.nd.zeros((1, 2)) c = mx.nd.Custom(a, b, op_type='Dot3') c.wait_to_read() assert_raises(MXNetError, custom_exc3) def custom_exc4(): def f(in_data, out_data): dot = mx.nd.dot(in_data[0], in_data[1]) # input to Cholesky factorization should be # symmetric positive-definite, error will be # triggered in op execution on cpu out_data[0][:] = mx.nd.linalg.potrf(dot) _build_dot_custom(f, 'Dot4') a = mx.nd.zeros((2, 1)) b = mx.nd.zeros((1, 2)) c = mx.nd.Custom(a, b, op_type='Dot4') c.wait_to_read() assert_raises(MXNetError, custom_exc4) @with_seed() def test_psroipooling(): for num_rois in [1, 2]: for num_classes, num_group in itertools.product([2, 3], [2, 3]): for image_height, image_width in itertools.product([168, 224], [168, 224]): for grad_nodes in [['im_data']]: spatial_scale = 0.0625 feat_height = np.int(image_height * spatial_scale) feat_width = np.int(image_width * spatial_scale) im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width) rois_data = np.zeros([num_rois, 5]) rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1)) rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1)) im_data_var = mx.symbol.Variable(name="im_data") rois_data_var = mx.symbol.Variable(name="rois_data") op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale, group_size=num_group, pooled_size=num_group, output_dim=num_classes, name='test_op') rtol, atol = 1e-2, 1e-3 check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol, grad_nodes=grad_nodes) @with_seed() def test_psroipooling_with_type(): arg_params = { 'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])} # plain psroipooling sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool') ctx_list = [{'ctx': mx.cpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}}, {'ctx': mx.cpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}}, {'ctx': mx.cpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}}, ] check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write', 'psroipool_rois': 'null'}, arg_params=arg_params) @with_seed() def test_deformable_convolution(): for num_batch in [1, 2]: for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]): for input_height, input_width in itertools.product([5, 6], [5, 6]): for dilate in [(1, 1), (2, 2)]: for grad_nodes in [['im_data'], ['offset_data'], ['weight']]: output_height = input_height output_width = input_width im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width) offset_data = \ np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\ * 0.8 + 0.1 weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3)) bias = np.zeros(num_channel_data) im_data_var = mx.symbol.Variable(name="im_data") offset_data_var = mx.symbol.Variable(name="offset_data") weight_var = mx.symbol.Variable(name="weight") bias_var = mx.symbol.Variable(name="bias") op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var, offset=offset_data_var, weight=weight_var, bias=bias_var, num_filter=num_channel_data, pad=dilate, kernel=(3, 3), stride=(1, 1), dilate=dilate, num_deformable_group=num_deformable_group) if grad_nodes[0] == 'offset_data': # wider tolerance needed for coordinate differential rtol, atol = 1.0, 1e-2 else: rtol, atol = 0.05, 1e-3 # By now we only have gpu implementation if default_context().device_type == 'gpu': check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0)) def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w): num_rois = input_rois.shape[0] output_offset = input_offset.copy() # simulate deformable psroipooling forward function for roi_idx in range(num_rois): sub_rois = input_rois[roi_idx, :].astype(np.float32) img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4] roi_start_w = round(x0) * spatial_scale - 0.5 roi_start_h = round(y0) * spatial_scale - 0.5 roi_end_w = round(x1 + 1) * spatial_scale - 0.5 roi_end_h = round(y1 + 1) * spatial_scale - 0.5 roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part for c_top in range(output_dim): channel_each_cls = output_dim / num_classes class_id = int(c_top / channel_each_cls) for ph in range(pooled_h): for pw in range(pooled_w): part_h = int(math.floor(float(ph) / pooled_h * part_size)) part_w = int(math.floor(float(pw) / pooled_w * part_size)) trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w need_check = True while need_check: pass_check = True for ih in range(sample_per_part): for iw in range(sample_per_part): h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5: continue w = min(max(w, 0.1), feat_w - 1.1) h = min(max(h, 0.1), feat_h - 1.1) # if the following condiiton holds, the sampling location is not differentiable # therefore we need to re-do the sampling process if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3: trans_x, trans_y = random.random() * trans_std, random.random() * trans_std pass_check = False break if not pass_check: break if pass_check: output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std need_check = False return output_offset @unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713") @with_seed() def test_deformable_psroipooling(): sample_per_part = 4 trans_std = 0.1 for num_rois in [1, 2]: for num_classes, num_group in itertools.product([2, 3], [2, 3]): for image_height, image_width in itertools.product([160, 224], [160, 224]): for grad_nodes in [['im_data'], ['offset_data']]: spatial_scale = 0.0625 stride = int(1 / spatial_scale) feat_height = np.int(image_height * spatial_scale) feat_width = np.int(image_width * spatial_scale) im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width) rois_data = np.zeros([num_rois, 5]) rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group) # at certain points, the bilinear interpolation function may be non-differentiable # to avoid this, we check whether the input locates on the valid points offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group, sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width) im_data_var = mx.symbol.Variable(name="im_data") rois_data_var = mx.symbol.Variable(name="rois_data") offset_data_var = mx.symbol.Variable(name="offset_data") op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var, trans=offset_data_var, spatial_scale=spatial_scale, sample_per_part=4, group_size=num_group, pooled_size=num_group, output_dim=num_classes, trans_std=0.1, no_trans=False, name='test_op') rtol, atol = 1e-2, 1e-3 # By now we only have gpu implementation if default_context().device_type == 'gpu': check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0)) def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9): num_eps = 1e-6 rtol_bw = 1e-5 atol_bw = 1e-6 data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') data3 = mx.symbol.Variable('data3') check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) shape1 = (2, 3) shape2 = (3, 2) shape3 = (3, 3) shape4 = (2, 2) data_in1 = np.random.uniform(1, 10, shape1).astype(dtype) data_in2 = np.random.uniform(1, 10, shape2).astype(dtype) data_in3 = np.random.uniform(1, 10, shape3).astype(dtype) data_in4 = np.random.uniform(1, 10, shape4).astype(dtype) # Check all transpositions of gemm operator. data_in1_t = np.transpose(data_in1) data_in2_t = np.transpose(data_in2) res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.) check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2, data_in4]) res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., transpose_a=True, transpose_b=True) check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2, data_in3]) res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., transpose_a=True) check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1, data_in3]) res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4 test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., transpose_b=True) check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1, data_in4]) # Check batch of gemm. a = rep_3x(data_in1, 2, 3) b = rep_3x(data_in2, 3, 2) c = rep_3x(data_in4, 2, 2) r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4 r = rep_3x(r, 2, 2) test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.) check_fw(test_gemm, [a, b, c], [r]) if grad_check == 1: check_grad(test_gemm, [a, b, c]) # Check for different axis that describes matrix rows. a2 = np.copy(np.swapaxes(a, 0, 2)) b2 = np.copy(np.swapaxes(b, 0, 2)) c2 = np.copy(np.swapaxes(c, 0, 2)) r2 = np.copy(np.swapaxes(r, 0, 2)) test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0) check_fw(test_gemm, [a2, b2, c2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2, c2]) a2 = np.copy(np.swapaxes(a, 1, 2)) b2 = np.copy(np.swapaxes(b, 1, 2)) c2 = np.copy(np.swapaxes(c, 1, 2)) r2 = np.copy(np.swapaxes(r, 1, 2)) test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3) check_fw(test_gemm, [a2, b2, c2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2, c2]) # Check gemm2 operator same way as gemm. res_gemm = 4. * np.dot(data_in1, data_in2) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.) check_fw(test_gemm, [data_in1, data_in2], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2]) res_gemm = 4. * np.dot(data_in1_t, data_in2_t) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True, transpose_b=True) check_fw(test_gemm, [data_in1, data_in2], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in2]) res_gemm = 4. * np.dot(data_in1_t, data_in1) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True) check_fw(test_gemm, [data_in1, data_in1], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1]) res_gemm = 4. * np.dot(data_in1, data_in1_t) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True) check_fw(test_gemm, [data_in1, data_in1], [res_gemm]) if grad_check == 1: check_grad(test_gemm, [data_in1, data_in1]) # Check batch of gemm2. a = rep_3x(data_in1, 2, 3) b = rep_3x(data_in2, 3, 2) r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.) check_fw(test_gemm, [a, b], [r]) if grad_check == 1: check_grad(test_gemm, [a, b]) a2 = np.copy(np.swapaxes(a, 0, 2)) b2 = np.copy(np.swapaxes(b, 0, 2)) r2 = np.copy(np.swapaxes(r, 0, 2)) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0) check_fw(test_gemm, [a2, b2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2]) a2 = np.copy(np.swapaxes(a, 1, 2)) b2 = np.copy(np.swapaxes(b, 1, 2)) r2 = np.copy(np.swapaxes(r, 1, 2)) test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3) check_fw(test_gemm, [a2, b2], [r2]) if grad_check == 1: check_grad(test_gemm, [a2, b2]) # Test gemm separately from other la-operators. @with_seed() def test_gemm(): _gemm_test_helper(np.float64, True) os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0" _gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7) if default_context().device_type == 'gpu': os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1" _gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7) os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0" # Helper functions for test_laop def _make_symm_symbol(a, ndims): assert ndims >= 2 tr_shape = list(range(ndims)) tr_shape[-1] = ndims-2 tr_shape[-2] = ndims-1 tr_shape = tuple(tr_shape) return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape)) def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32): assert ndims >= 2 # The last two dimensions must both be m # Create mask for lower triangle and diagonal index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32) lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype) for j in range(1, m): part1 = mx.sym.zeros(shape=(j, m), dtype=dtype) index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32) part2 = mx.sym.one_hot(index, depth=m, dtype=dtype) lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0) if not lower: lt_mask = mx.sym.reshape(lt_mask, shape=(m, m)) lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0)) shp = tuple([1]*(ndims-2) + [m, m]) lt_mask = mx.sym.reshape(lt_mask, shape=shp) return mx.sym.broadcast_mul(a, lt_mask) # @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced # tracked at https://github.com/apache/incubator-mxnet/issues/11718 @with_seed() def test_laop(): dtype = np.float64 rtol_fw = 1e-7 atol_fw = 1e-9 num_eps = 2e-6 rtol_bw = 1e-5 atol_bw = 1e-5 # enable numerical checking of gradients grad_check = 1 data1 = mx.symbol.Variable('data1') data2 = mx.symbol.Variable('data2') rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) def check_fw_grad(sym, location, expected): check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) if grad_check == 1: check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) matrix = np.array([[9., 3., -6., 12.], [3., 26., -7., -11.], [-6., -7., 9., 7.], [12., -11., 7., 65.]]) trian = np.array([[3., 0., 0., 0.], [1., 5., 0., 0.], [-2., -1., 2., 0.], [4., -3., 6., 2.]]) pow = np.array([[2., 1., 1., 1.], [1., 4., 1., 1.], [1., 1., 8., 1.], [1., 1., 1., 16.]]) inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.], [0.05/3., 0.05, 0.05, 0.], [2.65, 0.05, 2.5, -0.75], [-2.5/3., 0., -0.75, 0.25]]) ident = np.eye(4) shape = (4, 4, 1, 1) ones = mx.nd.ones(shape).asnumpy() for lower in [True, False]: upper = not lower # Tests with trivial 1x1 matrices. data_in = np.random.uniform(1, 10, shape) # test potrf # Note: Have to symmetrize input, for gradient test to work res_potrf = np.sqrt(data_in) test_potrf = mx.sym.linalg.potrf(data1, lower=lower) check_fw_grad(test_potrf, [data_in], [res_potrf]) # test potri res_potri = np.divide(ones, data_in * data_in) test_potri = mx.sym.linalg.potri(data1, lower=lower) check_fw_grad(test_potri, [data_in], [res_potri]) # test trsm trian_in = data_in * 7. test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower) check_fw_grad(test_trsm, [trian_in, data_in], [ones]) # test trmm trian_in = np.divide(ones, trian_in) test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True, rightside=True, lower=lower) check_fw_grad(test_trmm, [trian_in, data_in], [ones]) # test sumlogdiag res_sumlogdiag = np.reshape(np.log(data_in), (4, 4)) test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1) check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag]) # more elaborate example of Cholesky factorization low_trian = trian if upper: trian = np.transpose(trian) # test potrf test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower) a = rep_3x(matrix, 4, 4) r = rep_3x(trian, 4, 4) check_fw_grad(test_potrf, [a], [r]) #test potri data1_ltri = _make_triangle_symm( data1, ndims=4, m=4, lower=lower, dtype=dtype) test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower) a = rep_3x(trian, 4, 4) r = rep_3x(inv, 4, 4) check_fw_grad(test_potri, [a], [r]) # test trsm test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower) b = rep_3x(matrix, 4, 4) r = rep_3x(7. * np.transpose(low_trian), 4, 4) check_fw_grad(test_trsm, [a, b], [r]) test_trsm2 = mx.sym.linalg.trsm( data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower) r = rep_3x(-2. * low_trian, 4, 4) check_fw_grad(test_trsm2, [a, b], [r]) test_trsm3 = mx.sym.linalg.trsm( data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower) b = rep_3x(np.transpose(low_trian), 4, 4) r = rep_3x(0.5 * ident, 4, 4) check_fw_grad(test_trsm3, [a, b], [r]) test_trsm4 = mx.sym.linalg.trsm( data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower) b = rep_3x(low_trian, 4, 4) r = rep_3x(-0.5 * ident, 4, 4) check_fw_grad(test_trsm4, [a, b], [r]) # test trmm test_trmm = mx.sym.linalg.trmm( data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower) a = [a, rep_3x(matrix, 4, 4)] r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4) check_fw_grad(test_trmm, a, [r]) test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower) r = rep_3x(-2. * np.dot(trian, matrix), 4, 4) check_fw_grad(test_trmm2, a, [r]) test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower) r = rep_3x(np.dot(matrix, trian), 4, 4) check_fw_grad(test_trmm3, a, [r]) test_trmm4 = mx.sym.linalg.trmm( data1_ltri, data2, alpha=1.2, transpose=True, lower=lower) r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4) check_fw_grad(test_trmm4, a, [r]) # test sumlogdiag r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,)) check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r]) # Tests for operators linalg.syrk, linalg.gelqf def _gelqf_combined_symbol(a): q, l = mx.sym.linalg.gelqf(a) q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt') l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q') return mx.sym.Group([q_qt, l_q]) # NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the # backward gradient for the unused output is of dtype np.float32 then. # ==> Very annoying! def _gelqf_first_output(a): q, l = mx.sym.linalg.gelqf(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(q, bogus_scal) def _gelqf_second_output(a): q, l = mx.sym.linalg.gelqf(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(l, bogus_scal) def _syevd_combined_symbol(a): u, lam = mx.sym.linalg.syevd(a) u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut') lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u) ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True, transpose_b=False, name='Ut_L_U') return mx.sym.Group([u_ut, ut_lam_u]) @with_seed() def test_laop_2(): dtype = np.float64 rtol_fw = 1e-7 atol_fw = 1e-9 num_eps = 1e-6 rtol_bw = 1e-5 atol_bw = 1e-6 # enable numerical checking of gradients grad_check = 1 data1 = mx.symbol.Variable('data1') check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) # Tests for linalg.syrk mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)] for m, n, alpha in mnalpha_lst: #print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha)) data_in1 = np.random.uniform(1, 10, (m, n)) res_syrk1 = alpha * np.dot(data_in1, data_in1.T) test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha) check_fw(test_syrk1, [data_in1], [res_syrk1]) if grad_check == 1: check_grad(test_syrk1, [data_in1]) res_syrk2 = alpha * np.dot(data_in1.T, data_in1) test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha) check_fw(test_syrk2, [data_in1], [res_syrk2]) if grad_check == 1: check_grad(test_syrk2, [data_in1]) # Batch mode (3x the same thing) a_batch = rep_3x(data_in1, m, n) r1_batch = rep_3x(res_syrk1, m, m) check_fw(test_syrk1, [a_batch], [r1_batch]) if grad_check == 1: check_grad(test_syrk1, [a_batch]) r2_batch = rep_3x(res_syrk2, n, n) check_fw(test_syrk2, [a_batch], [r2_batch]) if grad_check == 1: check_grad(test_syrk2, [a_batch]) # Tests for linalg.gelqf # Currently disabled on GPU as they need cuda8 # and MxNet builds use cuda 7.5 if not (default_context() == mx.cpu()): return test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q)) test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling) test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling) mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)] for m, n in mn_lst: #print('gelqf: m={}, n={}'.format(m, n)) data_in1 = np.random.normal(0., 10., (m, n)) res_eye = np.eye(m) res_a = data_in1 check_fw(test_gelqf2, [data_in1], [res_eye, res_a]) if grad_check == 1: # A => Q check_grad(test_gelqf_q, [data_in1]) # A => L check_grad(test_gelqf_l, [data_in1]) # Batch mode (3x the same thing) a_batch = rep_3x(data_in1, m, n) reye_batch = rep_3x(res_eye, m, m) ra_batch = a_batch check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch]) if grad_check == 1: # A => Q check_grad(test_gelqf_q, [a_batch]) # A => L check_grad(test_gelqf_l, [a_batch]) # Tests for operator linalg.syevd def _syevd_first_output(a): u, lam = mx.sym.linalg.syevd(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(u, bogus_scal) def _syevd_second_output(a): u, lam = mx.sym.linalg.syevd(a) bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0 return mx.sym.broadcast_add(lam, bogus_scal) def _syevd_forward(a): lam, ut = np.linalg.eig(a) ind = np.argsort(lam) lam = lam[ind] u = ut[:, ind].T for i in range(0, a.shape[0]): _syevd_forw_eigvec_sign(u[i]) return u, lam def _syevd_forw_eigvec_sign(v): ind = np.argmax(np.abs(v)) if v[ind] < 0.: v[:] = -v def _syevd_backward(grad_u, grad_l, u, l): n = l.size assert grad_l.size == n assert grad_u.shape == (n, n) assert u.shape == (n, n) temp = np.dot(grad_u, u.T) temp2 = np.diag(grad_l) for i in range(1, n): for j in range(0, i): denom = 2. * (l[i] - l[j]) elem = (temp[i, j] - temp[j, i])/denom temp2[i, j] = elem temp2[j, i] = elem temp3 = np.dot(u.T, temp2) return np.dot(temp3, u) # Seed set because the test is not robust enough to operate on random data @with_seed(1896893923) def test_laop_3(): # Currently disabled on GPU as syevd needs cuda8 # and MxNet builds use cuda 7.5 if not (default_context() == mx.cpu()): return dtype = np.float64 rtol_fw = 1e-6 atol_fw = 1e-6 num_eps = 1e-4 rtol_bw = 1e-2 atol_bw = 1e-2 # enable numerical checking of gradients grad_check = 1 data1 = mx.symbol.Variable('data1') check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) rep_3x = lambda a, m, n :\ np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n)) check_bw = lambda sym, location, out_grads, expected :\ check_symbolic_backward(sym, location, out_grads, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) # Tests for linalg.syevd test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U) data1_s2 = _make_symm_symbol(data1, ndims=2) test_syevd_u_2 = _syevd_first_output(data1_s2) test_syevd_l_2 = _syevd_second_output(data1_s2) data1_s4 = _make_symm_symbol(data1, ndims=4) test_syevd_u_4 = _syevd_first_output(data1_s4) test_syevd_l_4 = _syevd_second_output(data1_s4) n_lst = [4, 1, 2, 10, 14] for n in n_lst: #print('\n** syevd: n={}'.format(n)) data_in1 = np.random.normal(0., 10., (n, n)) data_in1 = 0.5 * (data_in1 + data_in1.T) res_eye = np.eye(n) res_a = data_in1 check_fw(test_syevd2, [data_in1], [res_eye, res_a]) # Check backward grad_u = np.random.normal(0., 2., (n, n)) grad_l = np.random.normal(0., 2., (n,)) bw_u, bw_l = _syevd_forward(data_in1) grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l) check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a]) if grad_check == 1: # A => U check_grad(test_syevd_u_2, [data_in1]) # A => L check_grad(test_syevd_l_2, [data_in1]) # Batch mode (3x the same thing) a_batch = rep_3x(data_in1, n, n) reye_batch = rep_3x(res_eye, n, n) ra_batch = a_batch check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch]) if grad_check == 1: # A => U check_grad(test_syevd_u_4, [a_batch]) # A => L check_grad(test_syevd_l_4, [a_batch]) # @piyushghai - Removing the fixed seed for this test. # Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721 @with_seed() def test_laop_4(): # Currently disabled on GPU as syevd needs cuda8 # and MxNet builds use cuda 7.5 if not (default_context() == mx.cpu()): return rtol_fw = 1e-6 atol_fw = 1e-6 data1 = mx.symbol.Variable('data1') check_fw = lambda sym, location, expected, dtype :\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) a_np = np.array([[1., 2.], [2., 4.]]) u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]]) l_np = np.array([0., 5.]) test_syevd = mx.sym.linalg.syevd(data1) # float64 #print('float64') check_fw(test_syevd, [a_np], [u_np, l_np], np.float64) # float32 #print('float32') check_fw(test_syevd, [a_np], [u_np, l_np], np.float32) def test_laop_5(): # tests for diagonal and triangular matrix extraction and generation data = mx.symbol.Variable('data') # test complete range of small matrices to cover corner cases for n in range(1, 5): # test batched and non-batched processing for b in range(3): shape = (n, n) if b == 0 else (b, n, n) data_in = np.random.uniform(1, 10, shape) # test all legal offsets of the diagonal for offs in range(1-n, n): # test extraction of diagonal test_diag = mx.sym.linalg.extractdiag(data, offset=offs) res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs) check_symbolic_forward(test_diag, [data_in], [res_diag]) check_numeric_gradient(test_diag, [data_in]) # test generation of diagonal matrix test_diag2 = mx.sym.linalg.makediag(data, offset=offs) res_diag2 = None if b == 0: res_diag2 = np.diagflat(res_diag, k=offs) else: for i in range(b): res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n)) res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0) check_symbolic_forward(test_diag2, [res_diag], [res_diag2]) check_numeric_gradient(test_diag2, [res_diag]) # check both settings for parameter "lower" in case of zero offset lower_vals = [True] if offs != 0 else [True, False] for lower in lower_vals: # test extraction of triangle by doing a full roundtrip as the intermediate extracted # triangle has different orderings than numpy. test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower) test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower) extracts_lower = (offs < 0) or ((offs == 0) and lower) res_trian = None if b == 0: res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs) else: for i in range(b): res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs) res = np.reshape(res, (1, n, n)) res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0) check_symbolic_forward(test_trian, [data_in], [res_trian]) check_numeric_gradient(test_trian, [data_in]) # Tests for linalg.inverse @with_seed() @unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975") def test_laop_6(): dtype = np.float64 rtol_fw = 1e-7 atol_fw = 1e-9 num_eps = 1e-6 rtol_bw = 1e-4 atol_bw = 1e-6 data = mx.symbol.Variable('data') check_fw = lambda sym, location, expected:\ check_symbolic_forward(sym, location, expected, rtol=rtol_fw, atol=atol_fw, dtype=dtype) check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw, atol=atol_bw, dtype=dtype) ## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible; ## det is away from zero, so the value of logdet is stable v = np.random.random(4) a = np.eye(4) + np.outer(v, v) a = np.tile(a, (3, 1, 1)) permute_mat = np.eye(4)[[1, 0, 2, 3]] # test matrix inverse r = np.eye(4) r = np.tile(r, (3, 1, 1)) test_inverse = mx.sym.linalg.inverse(data) test_eye = mx.sym.linalg.gemm2(data, test_inverse) check_fw(test_eye, [a], [r]) check_grad(test_inverse, [a]) # test matrix determinant # det r = np.linalg.det(a) test_det = mx.sym.linalg.det(data) check_fw(test_det, [a], [r]) check_grad(test_det, [a]) # test slogdet r1 = np.array([1., 1., 1.]) r2 = np.log(np.abs(np.linalg.det(a))) test_sign, test_logabsdet = mx.sym.linalg.slogdet(data) check_fw(test_sign, [a], [r1]) check_fw(test_sign, [np.dot(a, permute_mat)], [-r1]) check_fw(test_logabsdet, [a], [r2]) check_grad(test_logabsdet, [a]) @with_seed() def test_stack(): for _ in range(100): ndim = random.randint(1, 5) axis = random.randint(0, ndim) if random.randint(0, 1): axis = axis - ndim - 1 nin = random.randint(1, 3) dshape = [random.randint(1, 5) for _ in range(ndim)] inputs = [np.random.uniform(size=dshape) for _ in range(nin)] output = np.stack(inputs, axis=axis) sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)] out = mx.sym.stack(*sym_ins, axis=axis) check_symbolic_forward(out, inputs, [output]) check_numeric_gradient(out, inputs) @with_seed() @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288") def test_dropout(): def zero_count(array, ratio): zeros = 0 for i in array: if i == 0: zeros += 1 elif math.isnan(i): assert ratio == 1 # Only valid for ratio = 1 zeros += 1 return zeros def check_correctness(executor, input, ratio): input = input.ravel() output = executor.outputs[0].asnumpy().ravel() input_sum = np.sum(input) output_sum = np.sum(output) # Make sure input zeroes are none (test data setup check) assert zero_count(input, ratio) == 0 # count number of zeroes in output output_zeroes = zero_count(output, ratio) # Hopefully should be within ratio/2 % error = abs(output_sum - input_sum) / input_sum if ratio == 1.0: assert output_zeroes == len(input) elif ratio > 0.2: assert output_zeroes > 0 assert error < (ratio/2) elif ratio == 0: assert output_zeroes == 0 def check_dropout_ratio(ratio, shape, cudnn_off=True): # test dropout x = mx.sym.var('data') y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off) exe = y.simple_bind(ctx=default_context(), data=shape) if ratio == 1: max_value = float('nan') else: max_value = 1 if ratio == 0 else 1/ratio if ratio == 1: min_value = float('nan') else: min_value = 1 if ratio == 0 else 0 exe.arg_arrays[0][:] = 1 exe.forward(is_train=True) if not math.isnan(max_value): assert exe.outputs[0].asnumpy().max() > 0 else: assert math.isnan(exe.outputs[0].asnumpy().max()) if not math.isnan(min_value): assert exe.outputs[0].asnumpy().min() == min_value else: assert math.isnan(exe.outputs[0].asnumpy().min()) check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio) if ratio == 0.5: exe.backward([mx.nd.ones(shape)]) assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all() exe.forward(is_train=False) assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all() exe.backward([mx.nd.ones(shape)], is_train=False) assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all() # test permanent dropout x = mx.sym.var('data') y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off) exe = y.simple_bind(ctx=default_context(), data=shape) exe.arg_arrays[0][:] = 1 exe.forward(is_train=True) assert exe.outputs[0].asnumpy().max() == max_value assert exe.outputs[0].asnumpy().min() == min_value exe.backward([mx.nd.ones(shape)]) assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all() exe.forward(is_train=False) assert exe.outputs[0].asnumpy().max() == max_value assert exe.outputs[0].asnumpy().min() == min_value exe.backward([mx.nd.ones(shape)], is_train=False) assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all() def get_slice(x, axis, idx): ix = () for i in range(x.ndim): if i == axis: ix += (idx,) else: ix += (slice(None, None, None),) return x[ix] def check_dropout_axes(ratio, shape, axes, cudnn_off=True): compactshape = list(shape) for axis in axes: compactshape[axis] = 1 compactx = mx.random.uniform(shape=tuple(compactshape)) broadcastx = compactx.broadcast_to(shape) dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off) for axis in axes: target = get_slice(dropouty, axis, 0).asnumpy() for i in range(1, shape[axis]): assert(get_slice(dropouty, axis, i).asnumpy() == target).all() def check_passthrough(ratio, shape, cudnn_off=True): # test inference_mode forward and then backward a = mx.random.uniform(shape=shape) a.attach_grad() with mx.autograd.record(train_mode=False): b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity b.backward() assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy()) shape = (100, 100) check_dropout_ratio(0.5, shape) check_dropout_ratio(0.0, shape) check_dropout_ratio(1.0, shape) check_dropout_ratio(0.75, shape) check_dropout_ratio(0.25, shape) check_dropout_ratio(0.5, shape, cudnn_off=False) check_dropout_ratio(0.0, shape, cudnn_off=False) check_dropout_ratio(1.0, shape, cudnn_off=False) check_dropout_ratio(0.75, shape, cudnn_off=False) check_dropout_ratio(0.25, shape, cudnn_off=False) check_passthrough(0.5, shape) check_passthrough(0.0, shape) check_passthrough(1.0, shape) check_passthrough(0.5, shape, cudnn_off=False) check_passthrough(0.0, shape, cudnn_off=False) check_passthrough(1.0, shape, cudnn_off=False) nshape = (10, 10, 10, 10) with mx.autograd.train_mode(): check_dropout_axes(0.25, nshape, axes = (0,)) check_dropout_axes(0.25, nshape, axes = (1,)) check_dropout_axes(0.25, nshape, axes = (2,)) check_dropout_axes(0.25, nshape, axes = (3,)) check_dropout_axes(0.25, nshape, axes = (0, 1)) check_dropout_axes(0.25, nshape, axes = (0, 2)) check_dropout_axes(0.25, nshape, axes = (0, 3)) check_dropout_axes(0.25, nshape, axes = (1, 2)) check_dropout_axes(0.25, nshape, axes = (1, 3)) check_dropout_axes(0.25, nshape, axes = (2, 3)) check_dropout_axes(0.25, nshape, axes = (0, 1, 2)) check_dropout_axes(0.25, nshape, axes = (0, 2, 3)) check_dropout_axes(0.25, nshape, axes = (1, 2, 3)) check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False) check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False) @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290") @with_seed() def test_scatter_gather_nd(): def check(data, idx): data.attach_grad() with mx.autograd.record(): y = mx.nd.gather_nd(data, idx) y.backward(y) npidx = tuple(i.asnumpy() for i in idx) assert (data.asnumpy()[npidx] == y.asnumpy()).all() npdata = np.zeros_like(data.asnumpy()) npdata[npidx] = y.asnumpy() assert (npdata == data.grad.asnumpy()).all() assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all() for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']: data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6)) idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32') check(data, idx) idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32') check(data, idx) data = mx.nd.array([2, 3, 0], dtype=dtype) idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32') assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all() data = mx.nd.array([2, 3, 0], dtype=dtype) idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32') assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all() data_npy = np.random.randint(0, 10, (100,)) data = mx.nd.array(data_npy, dtype=dtype) idx = mx.nd.zeros(shape=(1, 100), dtype='int32') assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum()) if dtype == 'int64': data = mx.nd.array([2123162361283621, -31231236374787, -112372937128970, -1378278798172378], dtype=dtype) idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32') assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum()) def compare_forw_backw_unary_op( name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, shape, input_low, input_high, rtol, atol, dtype=np.float32): check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol, atol=atol, dtype=dtype) check_bw = lambda sym, location, out_grads, expected :\ check_symbolic_backward(sym, location, out_grads, expected, rtol=rtol, atol=atol, dtype=dtype) op_name = 'unary_op={}, dtype={}'.format(name, dtype) data = mx.symbol.Variable(op_name + '_data', dtype=dtype) # Comparison: Forward expression data_np = np.random.uniform(input_low, input_high, shape).astype(dtype) res_np = forward_numpy_call(data_np) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data), mx.sym.zeros_like(data), name=op_name) check_fw(op_ex, [data_np], [res_np]) # Comparison: Backward expression res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype) data_grad = backward_numpy_call(data_np) * res_grad check_bw(op_ex, [data_np], [res_grad], [data_grad]) def finite_diff_unary_op( name, forward_mxnet_call, shape, input_low, input_high, rtol, atol, num_eps): # Finite difference tests are done in float64 dtype = np.float64 check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol, atol=atol, dtype=dtype) data_np = np.random.uniform(input_low, input_high, shape).astype(dtype) data = mx.symbol.Variable('data', dtype=dtype) op_name = 'unary_op={}, dtype={}'.format(name, dtype) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data), mx.sym.zeros_like(data), name=op_name) check_grad(op_ex, [data_np]) def np_smooth_l1(x, sigma): issq = 1. / sigma / sigma absx = np.abs(x) temp = x * sigma return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq) def np_smooth_l1_grad(x, sigma): ssq = sigma * sigma return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x)) # Tests for unary operators (basic mathematical functions): # - Forward: Comparison to NumPy (several dtype) # - Backward: Comparison to NumPy (several dtype) # - Finite difference tests (only dtype = float64) # Seed set because the test is not robust enough to operate on random data @with_seed(192837465) def test_unary_math_operators(): have_scipy = True try: from scipy import special as scipy_special except: print("Could not import scipy. Skipping unit tests for special functions") have_scipy = False shape=(9, 10) dtype_l = [np.float64, np.float32, np.float16] rtol_l = [1e-7, 1e-6, 1e-2] rtol_less_l = [1e-6, 1e-5, 1e-2] atol_l = [1e-7, 1e-6, 1e-2] atol_less_l = [1e-6, 1e-5, 1e-2] rtol_fd = 1e-5 atol_fd = 1e-6 num_eps = 1e-6 unary_ops = { 'arccos' : [lambda x: mx.sym.arccos(x), lambda x: np.arccos(x), lambda x: -1. / np.sqrt(1. - x ** 2.), -0.95, 0.95], 'arccosh': [lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x), lambda x: 1. / np.sqrt(x ** 2 - 1.), 1.05, 10.0], 'arcsin': [lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x), lambda x: 1. / np.sqrt(1. - x ** 2), -0.95, 0.95], 'arcsinh': [lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x), lambda x: 1. / np.sqrt(x**2 + 1.), -5.0, 5.0], 'arctan': [lambda x: mx.sym.arctan(x), lambda x: np.arctan(x), lambda x: 1. / (x ** 2. + 1.), -5.0, 5.0], 'arctanh': [lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x), lambda x: 1. / (1. - x ** 2), -0.95, 0.95], 'cbrt': [lambda x: mx.sym.cbrt(x), lambda x: np.cbrt(x), lambda x: 1. / (3. * np.cbrt(x) ** 2), -10.0, 10.0], 'cos': [lambda x: mx.sym.cos(x), lambda x: np.cos(x), lambda x: -np.sin(x), -5.0, 5.0], 'cosh': [lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), -2.0, 2.0], 'exp': [lambda x: mx.sym.exp(x), lambda x: np.exp(x), lambda x: np.exp(x), -4.0, 4.0], 'expm1': [lambda x: mx.sym.expm1(x), lambda x: np.expm1(x), lambda x: np.exp(x), -0.1, 0.1], 'log': [lambda x: mx.sym.log(x), lambda x: np.log(x), lambda x: 1. / x, 0.01, 100.0], 'log10': [lambda x: mx.sym.log10(x), lambda x: np.log10(x), lambda x: 1. / (x * np.log(10.)), 0.01, 100.0], 'log2': [lambda x: mx.sym.log2(x), lambda x: np.log2(x), lambda x: 1. / (x * np.log(2.)), 0.01, 100.0], 'log1p': [lambda x: mx.sym.log1p(x), lambda x: np.log1p(x), lambda x: 1. / (1. + x), -0.1, 0.1], 'rcbrt': [lambda x: mx.sym.rcbrt(x), lambda x: 1. / np.cbrt(x), lambda x: -1. / (3. * x * np.cbrt(x)), 0.01, 100.0], 'reciprocal': [lambda x: mx.sym.reciprocal(x), lambda x: 1. / x, lambda x: -1. / (x ** 2), 0.01, 100.0], 'relu': [lambda x: mx.sym.relu(x), lambda x: np.maximum(x, 0.), lambda x: 1. * (x > 0.), -5.0, 5.0], 'rsqrt': [lambda x: mx.sym.rsqrt(x), lambda x: 1. / np.sqrt(x), lambda x: -0.5 / (x * np.sqrt(x)), 0.01, 100.0], 'sigmoid': [lambda x: mx.sym.sigmoid(x), lambda x: 1. / (np.exp(-x) + 1.), lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.), -3.0, 3.0], 'softsign': [lambda x: mx.sym.softsign(x), lambda x: x / (1. + np.abs(x)), lambda x: 1. / np.square(1. + np.abs(x)), -3.0, 3.0], 'sin': [lambda x: mx.sym.sin(x), lambda x: np.sin(x), lambda x: np.cos(x), -5.0, 5.0], 'sinh': [lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x), -2.0, 2.0], 'sqrt': [lambda x: mx.sym.sqrt(x), lambda x: np.sqrt(x), lambda x: 0.5 / np.sqrt(x), 0.01, 100.0], 'tan': [lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1., -1.5, 1.5], 'tanh': [lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, -4.0, 4.0], 'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.), lambda x: np_smooth_l1(x, 1.), lambda x: np_smooth_l1_grad(x, 1.), -2.0, 2.0], 'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x), lambda x: np_smooth_l1(x, 1.), lambda x: np_smooth_l1_grad(x, 1.), -2.0, 2.0], 'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.), lambda x: np_smooth_l1(x, 2.), lambda x: np_smooth_l1_grad(x, 2.), -1.0, 1.0] } if have_scipy: unary_ops['gamma'] = [lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x), lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.01, 5.0] unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x), lambda x: scipy_special.psi(x), 0.01, 20.0] # Loop over operators for name, op in unary_ops.items(): # Loop over dtype's for ind in range(len(dtype_l)): dtype = dtype_l[ind] if name == 'gammaln' or name == 'gamma': rtol = rtol_less_l[ind] atol = atol_less_l[ind] else: rtol = rtol_l[ind] atol = atol_l[ind] compare_forw_backw_unary_op( name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol, dtype) # Finite difference testing finite_diff_unary_op( name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps) def compare_forw_backw_binary_op( name, forward_mxnet_call, forward_numpy_call, backward1_numpy_call, backward2_numpy_call, shape, input1_low, input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32): check_fw = lambda sym, location, expected :\ check_symbolic_forward(sym, location, expected, rtol=rtol, atol=atol, dtype=dtype) check_bw = lambda sym, location, out_grads, expected :\ check_symbolic_backward(sym, location, out_grads, expected, rtol=rtol, atol=atol, dtype=dtype) op_name = 'binary_op={}, dtype={}'.format(name, dtype) data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype) data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype) # Comparison: Forward expression data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype) data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype) res_np = forward_numpy_call(data1_np, data2_np) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1), name=op_name) check_fw(op_ex, [data1_np, data2_np], [res_np]) # Comparison: Backward expression res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype) data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad]) def finite_diff_binary_op( name, forward_mxnet_call, shape, input1_low, input1_high, input2_low, input2_high, rtol, atol, num_eps): # Finite difference tests are done in float64 dtype = np.float64 check_grad = lambda sym, location:\ check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol, atol=atol, dtype=dtype) data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype) data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype) data1 = mx.symbol.Variable('data1', dtype=dtype) data2 = mx.symbol.Variable('data2', dtype=dtype) op_name = 'binary_op={}, dtype={}'.format(name, dtype) op_ex = mx.sym.broadcast_add( forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1), name=op_name) check_grad(op_ex, [data1_np, data2_np]) # Tests for unary operators (basic mathematical functions): # - Forward: Comparison to NumPy (several dtype) # - Backward: Comparison to NumPy (several dtype) # - Finite difference tests (only dtype = float64) @with_seed() def test_binary_math_operators(): shape=(9, 10) dtype_l = [np.float64, np.float32, np.float16] rtol_l = [1e-7, 1e-6, 1e-2] atol_l = [1e-7, 1e-6, 1e-2] rtol_fd = 1e-5 atol_fd = 1e-6 num_eps = 1e-6 binary_ops = { 'hypot' : [lambda x, y: mx.sym.hypot(x, y), lambda x, y: np.hypot(x, y), lambda x, y: x / np.hypot(x, y), lambda x, y: y / np.hypot(x, y), -5.0, 5.0, -5.0, 5.0], 'pow': [lambda x, y: mx.sym.pow(x, y), lambda x, y: np.power(x, y), lambda x, y: np.power(x, y - 1.) * y, lambda x, y: np.power(x, y) * np.log(x), 0.2, 5.0, -4.0, 4.0], 'power': [lambda x, y: mx.sym.power(x, y), lambda x, y: np.power(x, y), lambda x, y: np.power(x, y - 1.) * y, lambda x, y: np.power(x, y) * np.log(x), 0.2, 5.0, -4.0, 4.0] } # Loop over operators for name, op in binary_ops.items(): # Loop over dtype's for ind in range(len(dtype_l)): dtype = dtype_l[ind] compare_forw_backw_binary_op( name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6], op[7], rtol_l[ind], atol_l[ind], dtype) # Finite difference testing finite_diff_binary_op( name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd, num_eps) @with_seed() def test_softmax(): check_softmax_with_shape((3, 4), default_context(), preserve_shape=False) check_softmax_with_shape((3, 4), default_context(), preserve_shape=True) check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True) check_softmax_grad(default_context()) check_smoothed_softmax_grad(default_context()) @with_seed() def test_softmax_output_normalization(): def _softmaxoutput_normalization(multi_output, use_ignore, normalization): grad_scale = np.random.random() batch_size = 8 num_labels = 6 H, W = 3, 3 ignore_label = np.random.randint(0, num_labels) if use_ignore else -1 if multi_output: data_shape = (batch_size, num_labels, H, W) label_shape = (batch_size, H, W) else: data_shape = (batch_size, num_labels) label_shape = (batch_size, ) data = mx.nd.random.uniform(-1, 1, shape=data_shape) label = mx.nd.random.randint( 0, num_labels, shape=label_shape).astype('float32') data.attach_grad() kwargs = dict(grad_scale=grad_scale, normalization=normalization, multi_output=multi_output) if use_ignore: kwargs.update(use_ignore=True, ignore_label=ignore_label) with mx.autograd.record(): out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs) out.backward(mx.nd.ones_like(data)) exp_data = mx.nd.exp(data) softmax_data = exp_data / exp_data.sum(1, keepdims=True) argmax_data = mx.nd.argmax(data, axis=1) assert_almost_equal(out.asnumpy(), softmax_data.asnumpy()) one_hot_label = mx.nd.one_hot(label, num_labels) if multi_output: one_hot_label = one_hot_label.transpose((0, 3, 1, 2)) data_grad = softmax_data - one_hot_label if use_ignore: if multi_output: data_grad *= (label != ignore_label).reshape((batch_size, 1, H, W)) else: data_grad *= (label != ignore_label).reshape((batch_size, 1)) valid_cnt = 1 if normalization == 'batch': valid_cnt = batch_size elif normalization == 'valid': valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum()) scale = grad_scale / valid_cnt if multi_output: if normalization != 'valid': scale /= H * W data_grad *= scale assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy()) for multi_output in [False, True]: for use_ignore in [False, True]: for normalization in ['null', 'batch', 'valid']: _softmaxoutput_normalization( multi_output, use_ignore, normalization) @with_seed() def test_slice(): def test_slice_forward_backward(a, index): a_np = a.asnumpy() begin = [] end = [] step = [] for slice_i in index: begin.append(slice_i.start) end.append(slice_i.stop) step.append(slice_i.step) b = mx.nd.slice(a, begin=begin, end=end, step=step) b_np = a_np[index] assert same(b.asnumpy(), b_np) data = mx.sym.Variable('data') slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step) expected_in_grad = np.zeros_like(a_np) expected_in_grad[index] = b_np check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad]) shape = (16, 14, 17, 20) arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape) index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)), (slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)), (slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)), (slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)), (slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))] for index in index_list: test_slice_forward_backward(arr, index) # check numeric gradient in_data = np.arange(36).reshape(2, 2, 3, 3) data = mx.sym.Variable('data') slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1]) check_numeric_gradient(slice_sym, [in_data]) def test_slice_partial_infer(): def check_slice_partial_infer(data, begin, end, step, expected_out_shape): out = mx.sym.slice(data, begin=begin, end=end, step=step) assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1] def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape): out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end) assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1] var1 = mx.sym.var(name="data", shape=(0, 20)) check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10)) check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5)) check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7)) check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3)) var1 = mx.sym.var(name="data", shape=(10, 0)) check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0)) check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0)) with mx.np_shape(): var1 = mx.sym.var(name="data", shape=(-1, 20)) check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10)) check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5)) check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7)) check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7)) check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3)) var1 = mx.sym.var(name='data', shape=(10, -1)) check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1)) check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1)) @with_seed() def test_float16_min_max(): """Test for issue: https://github.com/apache/incubator-mxnet/issues/9007""" a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16') assert a.dtype == np.float16 assert np.finfo('float16').min == mx.nd.min(a).asscalar() assert np.finfo('float16').max == mx.nd.max(a).asscalar() @with_seed() @mx.use_np_shape def test_zero_size_min_max(): def min(): a = mx.nd.zeros(shape=(5, 0)) a.min() def max(): a = mx.nd.zeros(shape=(5, 0)) a.max() assert_raises(MXNetError, min) assert_raises(MXNetError, max) @with_seed() def test_squeeze_op(): def check_squeeze_op(shape, axis=None): data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape) if axis is None: out = mx.nd.squeeze(data).asnumpy() out_expected = np.squeeze(data.asnumpy()) else: out = mx.nd.squeeze(data, axis=axis).asnumpy() out_expected = np.squeeze(data.asnumpy(), axis=axis) if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,) out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))])) assert same(out, out_expected) # check forward check_squeeze_op((1, 5, 1, 3, 1), 0) check_squeeze_op((1, 5, 1, 3, 1), 2) check_squeeze_op((1, 5, 1, 3, 1), 4) check_squeeze_op((1, 5, 1, 3, 1), (0, 4)) check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4)) check_squeeze_op((1, 5, 1, 3, 1)) check_squeeze_op((1, 1, 1, 1)) # check gradient data = mx.symbol.Variable('data') shape = (1, 2, 1, 3, 1) data_tmp = np.ones(shape) test = mx.sym.squeeze(data) check_numeric_gradient(test, [data_tmp]) test = mx.sym.squeeze(data, axis=2) check_numeric_gradient(test, [data_tmp]) test = mx.sym.squeeze(data, axis=(2, 4)) check_numeric_gradient(test, [data_tmp]) @with_seed() def test_adaptive_avg_pool_op(): def py_adaptive_avg_pool(x, height, width): # 2D per frame adaptive avg pool def adaptive_avg_pool_frame(x, y): isizeH, isizeW = x.shape osizeH, osizeW = y.shape for oh in range(osizeH): istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH)) iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH)) kH = iendH - istartH for ow in range(osizeW): istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW)) iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW)) kW = iendW - istartW xsum = 0 for ih in range(kH): for iw in range(kW): xsum += x[istartH+ih][istartW+iw] y[oh][ow] = xsum / kH / kW B,C,_,_ = x.shape y = np.empty([B,C,height, width], dtype=x.dtype) for b in range(B): for c in range(C): adaptive_avg_pool_frame(x[b][c], y[b][c]) return y def check_adaptive_avg_pool_op(shape, output_height, output_width=None): x = mx.nd.random.uniform(shape=shape) if output_width is None: y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height) npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height) else: y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width)) npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width) assert_almost_equal(y.asnumpy(), npy) shape = (2, 2, 10, 10) for i in range(1, 11): check_adaptive_avg_pool_op(shape, i) for j in range(1, 11): check_adaptive_avg_pool_op(shape, i, j) @with_seed() def test_bilinear_resize_op(): def py_bilinear_resize(x, outputHeight, outputWidth): batch, channel, inputHeight, inputWidth = x.shape if outputHeight == inputHeight and outputWidth == inputWidth: return x y = np.empty([batch, channel, outputHeight, outputWidth]) rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0 rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0 for h2 in range(outputHeight): h1r = 1.0 * h2 * rheight h1 = int(np.floor(h1r)) h1lambda = h1r - h1 h1p = 1 if h1 < (inputHeight - 1) else 0 for w2 in range(outputWidth): w1r = 1.0 * w2 * rwidth w1 = int(np.floor(w1r)) w1lambda = w1r - w1 w1p = 1 if w1 < (inputWidth - 1) else 0 for b in range(batch): for c in range(channel): y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \ w1lambda*x[b][c][h1][w1+w1p]) + \ h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \ w1lambda*x[b][c][h1+h1p][w1+w1p]) return y def py_bilinear_resize_backward(x, incoming_grads, mode='size'): data1 = np.zeros_like(x) data2 = incoming_grads batchsize = data1.shape[0] channels = data1.shape[1] height1 = data1.shape[2] width1 = data1.shape[3] height2 = data2.shape[2] width2 = data2.shape[3] rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0 rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0 # special case: just copy if height1 == height2 and width1 == width2: data1 += data2 return [data1] for h2 in range(0, height2): for w2 in range(0, width2): h1r = rheight * h2 h1 = int(h1r) h1p = 1 if (h1 < height1 - 1) else 0 h1lambda = h1r - h1 h0lambda = 1 - h1lambda # w1r = rwidth * w2 w1 = int(w1r) w1p = 1 if (w1 < width1 - 1) else 0 w1lambda = w1r - w1 w0lambda = 1 - w1lambda # for n in range(0, batchsize): for c in range(0, channels): d2val = data2[n][c][h2][w2] data1[n][c][h1][w1] += h0lambda * w0lambda * d2val data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val if mode == 'like': return data1, np.zeros_like(incoming_grads) return [data1] def check_bilinear_resize_op(shape, height, width): x = mx.nd.random.uniform(shape=shape) y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width) assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width)) x_scale = width / shape[-1] y_scale = height / shape[-2] y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale) assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width)) def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None): x = mx.nd.random.uniform(shape=shape) original_h = shape[2] original_w = shape[3] if mode == 'odd_scale': assert scale_height is not None and scale_width is not None new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \ int((original_h - 1) * scale_height) + 1 new_w = int(original_w * scale_width) if (original_w % 2) == 0 \ else int((original_w - 1) * scale_width) + 1 y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height, scale_width=scale_width, mode='odd_scale') elif mode == 'to_even_down': new_h = original_h if (original_h % 2) == 0 else original_h - 1 new_w = original_w if (original_w % 2) == 0 else original_w - 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down') elif mode == 'to_even_up': new_h = original_h if (original_h % 2) == 0 else original_h + 1 new_w = original_w if (original_w % 2) == 0 else original_w + 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up') elif mode == 'to_odd_down': new_h = original_h if (original_h % 2) == 1 else original_h - 1 new_w = original_w if (original_w % 2) == 1 else original_w - 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down') elif mode == 'to_odd_up': new_h = original_h if (original_h % 2) == 1 else original_h + 1 new_w = original_w if (original_w % 2) == 1 else original_w + 1 y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up') elif mode == 'like': x_1 = mx.nd.random.uniform(shape=shape_1) new_h = x_1.shape[2] new_w = x_1.shape[3] y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like') new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int') new_shape_got = np.array(y.shape, dtype='int') data_sym = mx.sym.var('data') data_np = x.asnumpy() expected = py_bilinear_resize(data_np, new_h, new_w) out_grads = np.ones([shape[0], shape[1], new_h, new_w]) expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode) assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format( str(new_shape_desired.tolist()), str(new_shape_got.tolist()))) assert_almost_equal(y.asnumpy(), expected, 1e-3, 0) if mode != 'like': resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode) check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5) check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5) check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4) else: data_sym_like = mx.sym.var('data_like') resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode) date_np_like = x_1.asnumpy() check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5) check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5) check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4) shape = (2, 2, 10, 10) check_bilinear_resize_op(shape, 5, 5) check_bilinear_resize_op(shape, 10, 10) check_bilinear_resize_op(shape, 15, 15) check_bilinear_resize_op(shape, 3, 7) check_bilinear_resize_op(shape, 13, 17) shape = (2, 2, 20, 20) check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale') check_bilinear_resize_modes_op(shape, mode='to_even_down') check_bilinear_resize_modes_op(shape, mode='to_even_up') check_bilinear_resize_modes_op(shape, mode='to_odd_down') check_bilinear_resize_modes_op(shape, mode='to_odd_up') shape = (2, 2, 21, 21) check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale') check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale') check_bilinear_resize_modes_op(shape, mode='to_even_down') check_bilinear_resize_modes_op(shape, mode='to_even_up') check_bilinear_resize_modes_op(shape, mode='to_odd_down') check_bilinear_resize_modes_op(shape, mode='to_odd_up') shape_0 = (2, 2, 21, 21) shape_1 = (2, 2, 10, 10) check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like') check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like') def test_multi_proposal_op(): # paramters feature_stride = 16 scales = (8, 16, 32) ratios = (0.5, 1, 2) rpn_pre_nms_top_n = 12000 rpn_post_nms_top_n = 2000 threshold = 0.7 rpn_min_size = 16 batch_size = 20 feat_len = (1000 + 15) // 16 H, W = feat_len, feat_len num_anchors = len(scales) * len(ratios) count_anchors = H * W * num_anchors ''' cls_prob: (batch_size, 2 * num_anchors, H, W) bbox_pred: (batch_size, 4 * num_anchors, H, W) im_info: (batch_size, 3) ''' cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32) bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32) im_info = mx.nd.empty((batch_size, 3), dtype = np.float32) cls_prob = mx.nd.array(np.random.random(cls_prob.shape)) bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape)) for i in range(batch_size): im_size = np.random.randint(100, feat_len * feature_stride, size = (2,)) im_scale = np.random.randint(70, 100) / 100.0 im_info[i, :] = [im_size[0], im_size[1], im_scale] def get_sub(arr, i): new_shape = list(arr.shape) new_shape[0] = 1 res = arr[i].reshape(new_shape) return res def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n): single_proposal = [] single_score = [] for i in range(batch_size): rois, score = mx.nd.contrib.Proposal( cls_prob = get_sub(cls_prob, i), bbox_pred = get_sub(bbox_pred, i), im_info = get_sub(im_info, i), feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = threshold, rpn_min_size = rpn_min_size, output_score = True) single_proposal.append(rois) single_score.append(score) multi_proposal, multi_score = mx.nd.contrib.MultiProposal( cls_prob = cls_prob, bbox_pred = bbox_pred, im_info = im_info, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = threshold, rpn_min_size = rpn_min_size, output_score = True) single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape) single_score = mx.nd.stack(*single_score).reshape(multi_score.shape) single_proposal_np = single_proposal.asnumpy() multi_proposal_np = multi_proposal.asnumpy() single_score_np = single_score.asnumpy() multi_score_np = multi_score.asnumpy() # check rois x1,y1,x2,y2 assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:]) # check rois batch_idx for i in range(batch_size): start = i * rpn_post_nms_top_n end = start + rpn_post_nms_top_n assert (multi_proposal_np[start:end, 0] == i).all() # check score assert np.allclose(single_score_np, multi_score_np) def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n): im_info_sym = mx.sym.Variable('im_info') cls_prob_sym = mx.sym.Variable('cls_prob') bbox_pred_sym = mx.sym.Variable('bbox_pred') sym = mx.sym.contrib.MultiProposal( cls_prob = cls_prob_sym, bbox_pred = bbox_pred_sym, im_info = im_info_sym, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = threshold, rpn_min_size = rpn_min_size, output_score = False) location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()] expected = [np.zeros_like(e) for e in location] out_grads = [np.ones((rpn_post_nms_top_n, 5))] check_symbolic_backward(sym, location, out_grads, expected) check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n) check_forward(rpn_pre_nms_top_n, 1500) check_forward(1000, 500) check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n) @with_seed() def test_quadratic_function(): def f(x, a, b, c): return a * x**2 + b * x + c a = np.random.random_sample() b = np.random.random_sample() c = np.random.random_sample() data = mx.symbol.Variable('data') quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c) for dtype in [np.float16, np.float32, np.float64]: tol = 1e-2 if dtype is np.float16 else 1e-5 for ndim in range(1, 6): shape = rand_shape_nd(ndim, 5) data_np = np.random.randn(*shape).astype(dtype) expected = f(data_np, a, b, c) backward_expected = 2 * a * data_np + b # check imperative forward output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c) assert_almost_equal(output, expected, rtol=tol, atol=tol) # check forward check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol) # check backward check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)], [backward_expected], rtol=tol, atol=tol) # check backward using finite difference check_numeric_gradient(quad_sym, [data_np], atol=0.001) def allclose_function(contexts): def getRandom(base, percent = 1.): return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100) title = 'exp' for ctx in contexts: title += ' cpu' if ctx == mx.cpu() else ' gpu' title += ' nElem shape' num_ctx = len(contexts) result = [False, False] for dtype in [np.float16, np.float32, np.float64]: rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5) atol = getRandom(1e-4 if dtype is np.float16 else 1e-7) print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol)) print(title) for ndim in range(1, 10): shape = rand_shape_nd(ndim, 8) a_np = np.random.randn(*shape).astype(dtype) b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype) expected = np.allclose(a_np, b_np, rtol, atol) for n, ctx in enumerate(contexts): a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx) b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx) output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol) result[n] = output.asnumpy() == 1 if expected != result[n]: # Preparing the output of elements of the array, which are considered as "not close" AND # corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close" v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU' if expected: v_cmp = 'Python' a_b = a_ctx.asnumpy() b_b = b_ctx.asnumpy() a_g = np.asarray(a_np) b_g = np.asarray(b_np) else: v_cmp = v_ctx v_ctx = 'Python' a_b = np.asarray(a_np) b_b = np.asarray(b_np) a_g = a_ctx.asnumpy() b_g = b_ctx.asnumpy() print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp)) frmt = " a[{0:d}]: b[{0:d}]:" \ " abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):" # Define the indices of all violations and corresponding values of coordinates bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b) a_values = [a_b[bad_indexes], a_g[bad_indexes]] b_values = [b_b[bad_indexes], b_g[bad_indexes]] idx = np.asarray(np.where(bad_indexes == True)) idx = idx.reshape(1, idx.size) idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten() for i in range(len(a_values[0])): flat_idx = idx_flat[i] print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx)) print(frmt.format(flat_idx)) for j in range(2): diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i]) print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff)) if num_ctx == 1: print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape)) else: print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape)) if expected != result[0] or num_ctx > 1 and expected != result[1]: assert False @with_seed() def test_allclose_function(): allclose_function([default_context()]) @with_seed() def test_histogram(): def f(x, bins=10, range=None): return np.histogram(x, bins, range=range) for ndim in range(1, 6): shape = rand_shape_nd(ndim) x = rand_ndarray(shape, stype='default', dtype=np.float64) mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64) np_bins = mx_bins.asnumpy() bin_cnt = random.randint(2, 10) bin_range = (-2.5, 2.5) mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range) np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range) assert_almost_equal(mx_bins1, np_bins1) assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5) mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins) np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins) assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5) assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5) data = mx.sym.Variable("data") bins = mx.sym.Variable("bins") histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range) histo2 = mx.sym.histogram(a=data, bins=bins) executor1 = histo1.bind(ctx=default_context(), args={"data" : x}) executor1.forward(is_train=False) assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False) executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins}) executor2.forward(is_train=False) assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False) def test_op_output_names_monitor(): def check_name(op_sym, expected_names): output_names = [] def get_output_names_callback(name, arr): output_names.append(py_str(name)) op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null') op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False) try: op_exe.forward() mx.nd.waitall() except mx.base.MXNetError: # skip errors since test is to check output names pass for output_name, expected_name in zip(output_names, expected_names): assert output_name == expected_name is_windows = sys.platform.startswith('win') if (is_windows): # Windows doesn't support set environment variable on the fly, so disable it for now pass else: # Disable subgraph in case subgraph will replace symbol os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE" data = mx.sym.Variable('data', shape=(10, 3, 10, 10)) conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv') check_name(conv_sym, ['conv_output']) deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv') check_name(deconv_sym, ['deconv_output']) fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc') check_name(fc_sym, ['fc_output']) lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn') check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm']) act_sym = mx.sym.Activation(data, act_type='relu', name='act') check_name(act_sym, ['act_output']) cc_sym = mx.sym.concat(data, data, dim=0, name='concat') check_name(cc_sym, ['concat_output']) sm_sym = mx.sym.softmax(data, name='softmax') check_name(sm_sym, ['softmax_output']) sa_sym = mx.sym.SoftmaxActivation(data, name='softmax') check_name(sa_sym, ['softmax_output']) us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest', name='upsampling') check_name(us_sym, ['upsampling_output']) us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg', name='pooling') check_name(us_sym, ['pooling_output']) del os.environ['MXNET_SUBGRAPH_BACKEND'] def test_op_all_names_monitor(): def check_name(op_sym, expected_names): output_names = [] def get_output_names_callback(name, arr): output_names.append(py_str(name)) op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null') op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True) try: op_exe.forward() mx.nd.waitall() except mx.base.MXNetError: # skip errors since test is to check all names pass for output_name, expected_name in zip(output_names, expected_names): assert output_name == expected_name is_windows = sys.platform.startswith('win') if (is_windows): # Windows doesn't support set environment variable on the fly, so disable it for now pass else: # Disable subgraph in case subgraph will replace symbol os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE" data = mx.sym.Variable('data', shape=(10, 3, 10, 10)) conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv') check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output']) deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv') check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output']) fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc') check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output']) lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn') check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm']) act_sym = mx.sym.Activation(data, act_type='relu', name='act') check_name(act_sym, ['data', 'act_input0', 'act_output']) cc_sym = mx.sym.concat(data, data, dim=0, name='concat') check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output']) sm_sym = mx.sym.softmax(data, name='softmax') check_name(sm_sym, ['data', 'softmax_data', 'softmax_output']) length = mx.sym.Variable("length", shape=(10, 10, 10)) sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax') check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output']) sa_sym = mx.sym.SoftmaxActivation(data, name='softmax') check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output']) us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest', name='upsampling') check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output']) us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg', name='pooling') check_name(us_sym, ['data', 'pooling_data', 'pooling_output']) del os.environ['MXNET_SUBGRAPH_BACKEND'] @with_seed() @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915") def test_activation(): shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)] dtype_l = [np.float64, np.float32, np.float16] rtol_l = [1e-7, 1e-6, 1e-2] atol_l = [1e-7, 1e-6, 1e-2] rtol_fd = 1e-5 atol_fd = 1e-6 num_eps = 1e-6 unary_ops = { 'relu': [lambda x: mx.sym.Activation(x, act_type='relu'), lambda x: np.maximum(x, 0.), lambda x: 1. * (x > 0.), -5.0, 5.0], 'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'), lambda x: 1. / (np.exp(-x) + 1.), lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.), -3.0, 3.0], 'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, -4.0, 4.0], 'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'), lambda x: np.log(1. + np.exp(x)), lambda x: 1. - 1 / (1 + np.exp(x)), -3.0, 3.0], 'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'), lambda x: x / (1. + np.abs(x)), lambda x: 1. / np.square(1. + np.abs(x)), -3.0, 3.0], } # Loop over operators for name, op in unary_ops.items(): # Loop over shapes for shape in shapes: # Loop over dtype's for ind in range(len(dtype_l)): dtype = dtype_l[ind] rtol = rtol_l[ind] atol = atol_l[ind] compare_forw_backw_unary_op( name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol, dtype) # Finite difference testing finite_diff_unary_op( name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps) @with_seed() def test_ravel(): # be aware that check_symbolic_forward will use float type internally # for the arrays and that limits the representable flat index range. # Taking dim==4 and a range of [0,..,100] for the data can already # cause precision issues and break this test. for dim in [1, 2, 3, 4]: data = np.random.randint(50, size=(dim, 500)) shape = tuple(np.add(np.amax(data, axis=1), [1])) a = mx.sym.Variable('a') ravel_npy = np.ravel_multi_index(data, shape) b = mx.sym.ravel_multi_index(a, shape=shape) check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy]) c = mx.sym.unravel_index(a, shape=shape) check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data]) # Test with leading dimension set to -1. shape2 = shape shape2 = (-1,)+shape[1:] b = mx.sym.ravel_multi_index(a, shape=shape2) check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy]) c = mx.sym.unravel_index(a, shape=shape2) check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data]) def test_context_num_gpus(): try: # Note: the test is run both on GPU and CPU hosts, so that we can not assert # on a specific number here. assert mx.context.num_gpus() >= 0 except mx.MXNetError as e: # Note: On a CPU only host CUDA sometimes is not able to determine the number # of GPUs if str(e).find("CUDA") == -1: raise e @with_seed() def test_op_roi_align(): T = np.float32 def assert_same_dtype(dtype_a, dtype_b): ''' Assert whether the two data type are the same Parameters ---------- dtype_a, dtype_b: type Input data types to compare ''' assert dtype_a == dtype_b,\ TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b)) def bilinear_interpolate(bottom, height, width, y, x): if y < -1.0 or y > height or x < -1.0 or x > width: return T(0.0), [] x = T(max(0.0, x)) y = T(max(0.0, y)) x_low = int(x) y_low = int(y) if x_low >= width - 1: x_low = x_high = width - 1 x = T(x_low) else: x_high = x_low + 1 if y_low >= height - 1: y_low = y_high = height - 1 y = T(y_low) else: y_high = y_low + 1 ly = y - T(y_low) lx = x - T(x_low) hy = T(1.0) - ly hx = T(1.0) - lx v1 = bottom[y_low, x_low] v2 = bottom[y_low, x_high] v3 = bottom[y_high, x_low] v4 = bottom[y_high, x_high] w1 = hy * hx w2 = hy * lx w3 = ly * hx w4 = ly * lx assert_same_dtype(w1.dtype, T) assert_same_dtype(w2.dtype, T) assert_same_dtype(w3.dtype, T) assert_same_dtype(w4.dtype, T) val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4 assert_same_dtype(val.dtype, T) grad = [(y_low, x_low, w1), (y_low, x_high, w2), (y_high, x_low, w3), (y_high, x_high, w4) ] return val, grad def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio, position_sensitive, dy): N, C, H, W = data.shape R = rois.shape[0] PH, PW = pooled_size assert rois.ndim == 2,\ ValueError( 'The ndim of rois should be 2 rather than %d' % rois.ndim) assert rois.shape[1] == 5,\ ValueError( 'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1]) assert_same_dtype(data.dtype, T) assert_same_dtype(rois.dtype, T) C_out = C // PH // PW if position_sensitive else C out = np.zeros((R, C_out, PH, PW), dtype=T) dx = np.zeros_like(data) drois = np.zeros_like(rois) for r in range(R): batch_ind = int(rois[r, 0]) sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale) roi_w = T(max(ew - sw, 1.0)) roi_h = T(max(eh - sh, 1.0)) bin_h = roi_h / T(PH) bin_w = roi_w / T(PW) bdata = data[batch_ind] if sampling_ratio > 0: roi_bin_grid_h = roi_bin_grid_w = sampling_ratio else: roi_bin_grid_h = int(np.ceil(roi_h / T(PH))) roi_bin_grid_w = int(np.ceil(roi_w / T(PW))) count = T(roi_bin_grid_h * roi_bin_grid_w) for c in range(C_out): for ph in range(PH): for pw in range(PW): val = T(0.0) c_in = c * PH * PW + ph * PW + pw if position_sensitive else c for iy in range(roi_bin_grid_h): y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \ bin_h / T(roi_bin_grid_h) for ix in range(roi_bin_grid_w): x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \ bin_w / T(roi_bin_grid_w) v, g = bilinear_interpolate( bdata[c_in], H, W, y, x) assert_same_dtype(v.dtype, T) val += v # compute grad for qy, qx, qw in g: assert_same_dtype(qw.dtype, T) dx[batch_ind, c_in, qy, qx] += dy[r, c, ph, pw] * qw / count out[r, c, ph, pw] = val / count assert_same_dtype(out.dtype, T) return out, [dx, drois] def test_roi_align_value(sampling_ratio=0, position_sensitive=False): ctx = default_context() dtype = np.float32 dlen = 224 N, C, H, W = 5, 3, 16, 16 R = 7 pooled_size = (3, 4) C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C spatial_scale = H * 1.0 / dlen data = mx.nd.array( np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype) center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx) pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1) rois = mx.nd.concat(batch_ind, pos, dim=1) data.attach_grad() rois.attach_grad() with mx.autograd.record(): output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sample_ratio=sampling_ratio, position_sensitive=position_sensitive) C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C dy = mx.nd.random.uniform(-1, 1, (R, C_out) + pooled_size, ctx=ctx, dtype=dtype) output.backward(dy) real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size, spatial_scale, sampling_ratio, position_sensitive, dy.asnumpy()) assert_almost_equal(output, real_output, atol=1e-3) assert_almost_equal(data.grad, dx, atol=1e-3) assert_almost_equal(rois.grad, drois, atol=1e-3) # modified from test_roipooling() def test_roi_align_autograd(sampling_ratio=0): ctx = default_context() data = mx.symbol.Variable(name='data') rois = mx.symbol.Variable(name='rois') test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1, sample_ratio=sampling_ratio) x1 = np.random.rand(4, 1, 12, 12).astype('float64') x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2]], dtype='float64') check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data': 'write', 'rois': 'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx) check_numeric_gradient(sym=test, location=[x1, x2], grad_nodes={'data': 'add', 'rois': 'null'}, numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx) test_roi_align_value() test_roi_align_value(sampling_ratio=2) test_roi_align_value(position_sensitive=True) test_roi_align_autograd() @with_seed() def test_op_rroi_align(): T = np.float32 def assert_same_dtype(dtype_a, dtype_b): ''' Assert whether the two data type are the same Parameters ---------- dtype_a, dtype_b: type Input data types to compare ''' assert dtype_a == dtype_b,\ TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b)) def bilinear_interpolate(bottom, height, width, y, x): if y < -1.0 or y > height or x < -1.0 or x > width: return T(0.0) x = T(max(0.0, x)) y = T(max(0.0, y)) x_low = int(x) y_low = int(y) if x_low >= width - 1: x_low = x_high = width - 1 x = T(x_low) else: x_high = x_low + 1 if y_low >= height - 1: y_low = y_high = height - 1 y = T(y_low) else: y_high = y_low + 1 ly = y - T(y_low) lx = x - T(x_low) hy = T(1.0) - ly hx = T(1.0) - lx v1 = bottom[y_low, x_low] v2 = bottom[y_low, x_high] v3 = bottom[y_high, x_low] v4 = bottom[y_high, x_high] w1 = hy * hx w2 = hy * lx w3 = ly * hx w4 = ly * lx assert_same_dtype(w1.dtype, T) assert_same_dtype(w2.dtype, T) assert_same_dtype(w3.dtype, T) assert_same_dtype(w4.dtype, T) val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4 assert_same_dtype(val.dtype, T) return val def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio): N, C, H, W = data.shape R = rois.shape[0] PH, PW = pooled_size assert rois.ndim == 2,\ ValueError( 'The ndim of rois should be 2 rather than %d' % rois.ndim) assert rois.shape[1] == 6,\ ValueError( 'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1]) assert_same_dtype(data.dtype, T) assert_same_dtype(rois.dtype, T) out = np.zeros((R, C, PH, PW), dtype=T) for r in range(R): batch_ind = int(rois[r, 0]) roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale) roi_theta = T(rois[r,5] * np.pi / 180.0) roi_w = T(max(roi_w, 1.0)) roi_h = T(max(roi_h, 1.0)) bin_h = roi_h / T(PH) bin_w = roi_w / T(PW) bdata = data[batch_ind] if sampling_ratio > 0: roi_bin_grid_h = roi_bin_grid_w = sampling_ratio else: roi_bin_grid_h = int(np.ceil(roi_h / T(PH))) roi_bin_grid_w = int(np.ceil(roi_w / T(PW))) count = T(roi_bin_grid_h * roi_bin_grid_w) roi_start_h = T(-roi_h / 2.0) roi_start_w = T(-roi_w / 2.0) for c in range(C): for ph in range(PH): for pw in range(PW): val = T(0.0) for iy in range(roi_bin_grid_h): yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \ bin_h / T(roi_bin_grid_h) for ix in range(roi_bin_grid_w): xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \ bin_w / T(roi_bin_grid_w) x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h v = bilinear_interpolate( bdata[c], H, W, y, x) assert_same_dtype(v.dtype, T) val += v out[r, c, ph, pw] = val / count assert_same_dtype(out.dtype, T) return out def test_rroi_align_value(sampling_ratio=-1): ctx = default_context() if ctx.device_type == 'gpu': print('skipped testing rroi align for gpu since it is not supported yet') return dtype = np.float32 dlen = 224 N, C, H, W = 5, 3, 16, 16 R = 7 pooled_size = (3, 4) spatial_scale = H * 1.0 / dlen data = mx.nd.array( np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype) center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype) theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype) batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx) pos = mx.nd.concat(center_xy, wh, theta, dim=1) rois = mx.nd.concat(batch_ind, pos, dim=1) output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio) real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size, spatial_scale, sampling_ratio) assert_almost_equal(output.asnumpy(), real_output, atol=1e-3) test_rroi_align_value() test_rroi_align_value(sampling_ratio=2) @with_seed() def test_diag(): # Test 2d input h = np.random.randint(2,9) w = np.random.randint(2,9) a_np = np.random.random((h, w)).astype(np.float32) a = mx.nd.array(a_np).astype('float32') for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]: assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k)) # invalid k k = max(h,w) + 1 assertRaises(MXNetError, mx.nd.diag, a, k=k) # Test 2d backward, k=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=1) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=-1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-1) check_numeric_gradient(diag_sym, [a_np]) # test 1d input d = np.random.randint(2,9) a_np = np.random.random((d)) a = mx.nd.array(a_np) # k is random k = np.random.randint(-d,d) assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k)) # Test 2d backward, k=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=1) check_numeric_gradient(diag_sym, [a_np]) # Test 2d backward, k=-1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-1) check_numeric_gradient(diag_sym, [a_np]) # Test 4d input x1 = np.random.randint(3,9) x2 = np.random.randint(3,9) x3 = np.random.randint(3,9) x4 = np.random.randint(3,9) a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32) a = mx.nd.array(a_np).astype('float32') # k = 0, axis1=0, axis2=1 r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1) assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1)) # k = 1, axis1=1, axis2=0 r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0) assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0)) # k = -1 axis1=1, axis3=3 r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3) assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3)) # k = 2, axis1=-2, axis2=0 r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0) assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0)) # Test 4d backward, k=0, axis1=3, axis2=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0) check_numeric_gradient(diag_sym, [a_np]) # Test 4d backward, k=1, axis1=1, axis2=2 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2) check_numeric_gradient(diag_sym, [a_np]) # Test 4d backward, k=-1, axis1=2, axis2=0 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0) check_numeric_gradient(diag_sym, [a_np]) # Test 4d backward, k=-2, axis1=1, axis2=-1 data = mx.sym.Variable('data') diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1) check_numeric_gradient(diag_sym, [a_np]) @with_seed() def test_depthtospace(): def f(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) return y block = random.randint(2, 4) rand_mul1 = random.randint(1, 4) n = random.randint(1, 5) c = block * block * rand_mul1 h = random.randint(1, 5) w = random.randint(1, 5) shape_inp = (n, c, h, w) data = rand_ndarray(shape_inp, 'default') data_np = data.asnumpy() expected = f(data_np, block) output = mx.nd.depth_to_space(data, block) assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3) shape_out = (n, c // (block ** 2), h * block, w * block) data = mx.sym.Variable('data') dts_sym = mx.sym.depth_to_space(data, block) check_numeric_gradient(dts_sym, [np.ones(shape_inp)]) check_symbolic_forward(dts_sym, [data_np], [expected]) check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)]) def test_invalid_depth_dim(): invalid_shape_inp = (n, block - 1, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.depth_to_space, data, block) def test_invalid_space_dim(): invalid_shape_inp = (n, block ** 2, 0, block + 1) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.depth_to_space, data, block) def test_invalid_block_size(): block = 0 invalid_shape_inp = (n , c, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.depth_to_space, data, block) test_invalid_depth_dim() test_invalid_space_dim() test_invalid_block_size() @with_seed() def test_spacetodepth(): def f(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize]) tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4]) y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize]) return y block = random.randint(2, 4) rand_mul1 = random.randint(1, 4) rand_mul2 = random.randint(1, 4) n = random.randint(1, 5) c = random.randint(1, 5) h = block * rand_mul1 w = block * rand_mul2 shape_inp = (n, c, h, w) data = rand_ndarray(shape_inp, 'default') data_np = data.asnumpy() expected = f(data_np, block) output = mx.nd.space_to_depth(data, block) assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3) shape_out = (n, c * (block ** 2), h // block, w // block) data = mx.sym.Variable('data') dts_sym = mx.sym.space_to_depth(data, block) check_numeric_gradient(dts_sym, [np.ones(shape_inp)]) check_symbolic_forward(dts_sym, [data_np], [expected]) check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)]) def test_invalid_space_dim(): invalid_shape_inp = (n , c, block - 1, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.space_to_depth, data, block) def test_invalid_block_size(): block = 0 invalid_shape_inp = (n, c, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.space_to_depth, data, block) def test_invalid_depth_dim(): invalid_shape_inp = (n, 0, h, w) data = rand_ndarray(invalid_shape_inp, 'default') assertRaises(MXNetError, mx.nd.space_to_depth, data, block) test_invalid_space_dim() test_invalid_block_size() test_invalid_depth_dim() @with_seed() def test_softmax_cross_entropy(): def f_sm_ce(data, label): return np.sum(-np.log(data) * label) data = mx.sym.Variable('data') label = mx.sym.Variable('label') sym = mx.sym.softmax_cross_entropy(data=data, label=label) num_labels = random.randint(100, 200) batch_size = random.randint(100, 200) np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy() np_sm = np_softmax(np_data) np_label = np.random.randint(0, num_labels, (batch_size, )) np_one_hot_label = np.zeros((batch_size, num_labels)) np_one_hot_label[np.arange(batch_size), np_label] = 1. check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5) @with_seed() def test_split_v2(): dim = random.randint(2, 6) shape = rand_shape_nd(dim) axis = random.randint(-dim, dim-1) axis_size = shape[axis] samples = random.randint(0, axis_size - 1) indices = sorted(random.sample([i for i in range(1, axis_size)], samples)) indices = tuple(indices) mx_data = rand_ndarray(shape) np_data = mx_data.asnumpy() np_out = np.split(np_data, indices_or_sections=indices, axis=axis) data = mx.sym.Variable("data") sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis) check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5) out_grad = [np.ones(arr.shape) for arr in np_out] check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)]) @with_seed() def test_moments(): dim = random.randint(2, 5) shape = rand_shape_nd(dim, dim=5) axes = [i for i in range(dim)] test_dims = random.sample(axes, random.randint(1, dim)) test_axes = tuple(sorted(test_dims)) np_a = np.random.uniform(-1.0, 1.0, shape) a = mx.nd.array(np_a) for keepdims in [True, False]: eps = 1e-3 np_a[abs(np_a) < eps] = 2 * eps np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims) np_var = np.var(np_a, axis=test_axes, keepdims=keepdims) mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes) N = np_a.size / np_mean.size mx_sym = mx.sym.Variable("data") mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims) mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1]) if len(np_mean.shape) == 0: np_mean = np_mean.reshape(mx_mean.shape) np_var = np_var.reshape(mx_var.shape) assert np_mean.shape == mx_mean.shape assert np_var.shape == mx_var.shape check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5) check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4) @with_seed() def test_invalid_kernel_size(): invalid_kernel_size = 28 assert_exception( mx.nd.Correlation, MXNetError, mx.nd.array(np.random.rand(1, 1, 28, 28)), mx.nd.array(np.random.rand(1, 1, 28, 28)), kernel_size=invalid_kernel_size) @with_seed() def test_valid_kernel_size(): valid_kernel_size = 9 mx.nd.Correlation( mx.nd.array(np.random.rand(1, 1, 28, 28)), mx.nd.array(np.random.rand(1, 1, 28, 28)), kernel_size=valid_kernel_size) @with_seed() def test_valid_max_pooling_pad_type_same(): import math input_data = mx.nd.array(np.random.rand(1,1,10)) stride = 2 kernel = 2 output_data=mx.nd.Pooling( input_data, kernel=kernel, stride=stride, pad=(0,0,0), pool_type='max', name='pooling', pooling_convention="same") assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2]) @with_seed() def test_invalid_max_pooling_pad_type_same(): import math input_data = mx.nd.array(np.random.rand(1,1,10)) stride = 2 kernel = 2 pad = 2 assert_exception( mx.nd.Pooling, MXNetError, input_data, stride=stride, kernel=kernel, pad=pad, pool_type='max', name='pooling', pooling_convention="same") @with_seed() def test_image_normalize(): # Part 1 - Test 3D input with 3D mean/std shape_3d = (3, 28, 28) mean = (0, 1, 2) std = (3, 2, 1) data_in_3d = mx.nd.random.uniform(0, 1, shape_3d) data_expected_3d = data_in_3d.asnumpy() data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0 data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0 data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0 data = mx.symbol.Variable('data') img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std) # check forward check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_3d = np.ones(shape_3d) grad_expected_3d[:][:][0] = 1 / 3.0 grad_expected_3d[:][:][1] = 1 / 2.0 grad_expected_3d[:][:][2] = 1 / 1.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)], expected=[grad_expected_3d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001) # Part 2 - Test 4D input with 3D mean/std shape_4d = (2, 3, 28, 28) data_in_4d = mx.nd.random.uniform(0, 1, shape_4d) data_expected_4d = data_in_4d.asnumpy() data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0 data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0 data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0 data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0 data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0 data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0 # check forward check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_4d = np.ones(shape_4d) grad_expected_4d[0][:][:][0] = 1 / 3.0 grad_expected_4d[0][:][:][1] = 1 / 2.0 grad_expected_4d[0][:][:][2] = 1 / 1.0 grad_expected_4d[1][:][:][0] = 1 / 3.0 grad_expected_4d[1][:][:][1] = 1 / 2.0 grad_expected_4d[1][:][:][2] = 1 / 1.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)], expected=[grad_expected_4d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001) # Part 3 - Test 3D input with scalar mean/std shape_3d = (3, 28, 28) mean = 1.0 std = 2.0 data_in_3d = mx.nd.random.uniform(0, 1, shape_3d) data_expected_3d = data_in_3d.asnumpy() data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0 data = mx.symbol.Variable('data') img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std) # check forward check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_3d = np.ones(shape_3d) grad_expected_3d[:][:][:] = 1 / 2.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)], expected=[grad_expected_3d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001) # Part 4 - Test 4D input with scalar mean/std shape_4d = (2, 3, 28, 28) data_in_4d = mx.nd.random.uniform(0, 1, shape_4d) data_expected_4d = data_in_4d.asnumpy() data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0 # check forward check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d], rtol=1e-5, atol=1e-5) # Gradient is 1/std_dev grad_expected_4d = np.ones(shape_4d) grad_expected_4d[:][:][:][:] = 1 / 2.0 # check backward check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)], expected=[grad_expected_4d], rtol=1e-5, atol=1e-5) # check backward using finite difference check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001) @with_seed() def test_index_array(): def test_index_array_default(): for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]: data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data) input_array = np.ones(shape) mgrid = np.mgrid[tuple(slice(0, x) for x in shape)] expected = np.stack(mgrid, axis=-1) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) @mx.use_np_shape def test_index_array_default_zero_dim(): data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data) input_array = np.ones(()) expected = np.zeros((0,)) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) @mx.use_np_shape def test_index_array_default_zero_size(): data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data) input_array = np.ones((0, 0, 0)) expected = np.zeros((0, 0, 0, 3)) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) def test_index_array_select_axes(): shape = (5, 7, 11, 13, 17, 19) for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]: data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data, axes=axes) input_array = np.ones(shape) mgrid = np.mgrid[tuple(slice(0, x) for x in shape)] expected = np.stack(mgrid, axis=-1)[..., axes] check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) @mx.use_np_shape def test_index_array_select_axes_zero_size(): data = mx.symbol.Variable("data") index_array = mx.sym.contrib.index_array(data, axes=(2, 1)) input_array = np.ones((0, 0, 0, 0)) expected = np.zeros((0, 0, 2)) check_symbolic_forward(index_array, [input_array], [expected]) check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)]) test_index_array_default() test_index_array_default_zero_dim() test_index_array_default_zero_size() test_index_array_select_axes() test_index_array_select_axes_zero_size() @with_seed() def test_scalar_tensor_creation(): assertRaises(MXNetError, mx.nd.zeros, shape=()) assertRaises(MXNetError, mx.nd.ones, shape=()) with mx.np_shape(): data_mx = mx.nd.ones(shape=()) data_np = np.ones((), dtype=data_mx.dtype) assert same(data_mx.asnumpy(), data_np) @with_seed() def test_zero_size_tensor_creation(): assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0)) assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0)) with mx.np_shape(): data_mx = mx.nd.ones(shape=(0, 1, 0, 4)) data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype) assert same(data_mx.asnumpy(), data_np) @with_seed() def test_concat_with_zero_size_tensor(): with mx.np_shape(): data1 = mx.nd.ones((0, 8, 12)) data2 = mx.nd.ones((3, 8, 12)) data3 = mx.nd.ones((0, 8, 12)) ret = mx.nd.Concat(data1, data2, data3, dim=0) assert ret.shape == (3, 8, 12) data1 = mx.nd.ones((0, 3, 10)) data2 = mx.nd.ones((0, 4, 10)) data3 = mx.nd.ones((0, 5, 10)) ret = mx.nd.Concat(data1, data2, data3, dim=1) assert ret.shape == (0, 12, 10) @with_seed() def test_np_shape_decorator(): @mx.use_np_shape def check_scalar_one(): """Generate scalar one tensor""" return mx.nd.ones(shape=()) assert check_scalar_one.__name__ == "check_scalar_one" assert check_scalar_one.__doc__ == "Generate scalar one tensor" assert check_scalar_one().shape == () for active in [True, False]: with mx.np_shape(active=active): assert check_scalar_one.__name__ == "check_scalar_one" assert check_scalar_one.__doc__ == "Generate scalar one tensor" assert check_scalar_one().shape == () @mx.use_np_shape def check_concat(shape1, shape2, axis): data1 = mx.nd.ones(shape1) data2 = mx.nd.ones(shape2) ret = mx.nd.Concat(data1, data2, dim=axis) expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis) assert ret.shape == expected_ret.shape check_concat((0, 3, 4), (5, 3, 4), 0) check_concat((8, 0, 5), (8, 7, 5), 1) check_concat((8, 0, 0), (8, 0, 0), 2) for active in [True, False]: check_concat((0, 3, 4), (5, 3, 4), 0) check_concat((8, 0, 5), (8, 7, 5), 1) check_concat((8, 0, 0), (8, 0, 0), 2) @with_seed() def test_add_n(): data_shape = (2, 2) input_num = 5 data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)] rslt = mx.nd.zeros(shape=data_shape) for i in range(input_num): rslt += data[i] add_n_rslt = mx.nd.add_n(*data, out=data[0]) assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5) def test_get_all_registered_operators(): ops = get_all_registered_operators() ok_(isinstance(ops, list)) ok_(len(ops) > 0) ok_('Activation' in ops) def test_get_operator_arguments(): operator_arguments = get_operator_arguments('Activation') ok_(isinstance(operator_arguments, OperatorArguments)) ok_(operator_arguments.names == ['data', 'act_type']) ok_(operator_arguments.types == ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]) ok_(operator_arguments.narg == 2) def test_transpose_infer_shape_back(): o1 = mx.sym.ones(shape=[2,3]) o2 = mx.sym.ones(shape=[-1,-1]) t = mx.sym.transpose(o2) b = o1 + t x = b.bind(mx.cpu(), args={}) y = x.forward() assert(y[0].shape == (2,3)) def test_transpose_infer_shape_mixed(): o1 = mx.sym.ones(shape=[2,-1]) o2 = mx.sym.ones(shape=[3,-1]) t = mx.sym.transpose(o2) b = o1 + t x = b.bind(mx.cpu(), args={}) y = x.forward() assert(y[0].shape == (2,3)) @with_seed() def test_sample_normal_default_shape(): # Test case from https://github.com/apache/incubator-mxnet/issues/16135 s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5])) assert s.shape == (1,) s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=()) assert s.shape == (1,) s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1) assert s.shape == (1, 1) s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,)) assert s.shape == (1, 1) def test_min_max_inf(): dtypes = [np.float32, np.double] elem_list = [-1, 1, 0, np.inf, -np.inf] for dtype in dtypes: for a in elem_list: for b in elem_list: data_np = np.array([a, b], dtype=dtype) data_mx = mx.nd.array(data_np, dtype=dtype) min_data_np, max_data_np = data_np.min(), data_np.max() min_data_mx, max_data_mx = data_mx.min(), data_mx.max() assert_array_equal(min_data_np, min_data_mx.asnumpy()) assert_array_equal(max_data_np, max_data_mx.asnumpy()) if __name__ == '__main__': import nose nose.runmodule()
adb_touch_sampler.py
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making GameAISDK available. This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """ import logging import threading import time import cv2 import subprocess import re import os import platform import traceback from .adb_event_parser import TouchPoint from . import adb_event_parser as ADBEventParser from WrappedDeviceAPI.deviceAPI.mobileDevice.android.plugin.Platform_plugin.PlatformWeTest import AdbTool from WrappedDeviceAPI.deviceAPI.mobileDevice.android.plugin.Platform_plugin.PlatformWeTest import GetInstance SCREEN_ORI_LANDSCAPE = 0 # 横屏 SCREEN_ORI_PORTRAIT = 1 # 竖屏 LOG = logging.getLogger('action_sampler') class ADBTouchSampler(object): def __init__(self, device_id=None): # 设置设备id到全局变量中 self.__device_id = device_id self.__device = AdbTool(device_id) self.__device_instance = GetInstance() self.__rotation = SCREEN_ORI_LANDSCAPE self.__touchXMax = None self.__touchYMax = None self._proc_getevent = None def init(self, long_edge, short_edge): self.__is_thread_running = False self.__device_instance.init(self.__device_id, long_edge=long_edge, standalone=True) device_info, err_msg = self.__device_instance.get_device_info() if not device_info: raise Exception(err_msg) max_contacts = device_info.touch_slot_number real_short_edge = int(long_edge * device_info.display_width / device_info.display_height) self.__screenCaptureHeight = long_edge self.__screenCaptureWidth = real_short_edge self.__short_edge_ratio = round(short_edge/real_short_edge, 3) output = self.__device.cmd("shell", "-x", "getevent", "-lp").communicate()[0].decode("utf-8") if output and output.find("ABS_MT_SLOT") != -1: LOG.info('This is Type B device') self.__parser = ADBEventParser.ADBEventParserTypeB(max_contacts) else: LOG.info('This is Type A device') self.__parser = ADBEventParser.ADBEventParserTypeA(max_contacts) self.__touchXMax, self.__touchYMax = device_info.touch_width, device_info.touch_height for i in range(50): err_code, img = self.__device_instance.get_image() if img is not None: break time.sleep(1) self.detect_rotation() self._proc_getevent = self.__device.raw_cmd('shell', '-x', 'getevent', '-l') # 启动一个线程去实时解析adb shell getevent返回的结果 def pull_thread_main(): self.__is_thread_running = True while self.__is_thread_running: line = self._proc_getevent.stdout.readline().strip() if not line: continue else: self.__parser.parse(line) self._t = threading.Thread(target=pull_thread_main) self._t.setDaemon(True) self._t.start() def __kill_unused_adb(self): if platform.platform().lower().startswith('window'): output = subprocess.Popen('tasklist /FI "IMAGENAME eq adb.exe" /FI "STATUS ne running" /NH', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].decode('UTF8') result = re.findall('adb.exe\s+(\d+)\s+', output) for pid in result: os.system('taskkill /F /PID %s' % pid) def deinit(self): """ 中止录制 :return: """ self.__is_thread_running = False self._t.join(10) if self._proc_getevent: self._proc_getevent.kill() self._proc_getevent = None self.__kill_unused_adb() self.__device_instance.deinit() def detect_rotation(self): """ 检测旋转调度 :return: """ rotation = self.__device_instance.get_rotation() if rotation % 2 == 0: # 竖屏 self.__rotation = SCREEN_ORI_PORTRAIT else: self.__rotation = SCREEN_ORI_LANDSCAPE def get_sample(self): # frame = self.__screen.GetScreen() err_code, frame = self.__device_instance.get_image() if err_code != 0: return None if frame is not None: h, w = frame.shape[:2] if h < w: h = int(h * self.__short_edge_ratio) else: w = int(w * self.__short_edge_ratio) frame = cv2.resize(frame.copy(), (w, h)) ret_points = [] points = self.__parser.get_touch_points() # 对points结果进行坐标转换到截图frame的坐标系下 for p in points: if p is None: continue else: if p.x is None or p.y is None: continue else: if self.__rotation == SCREEN_ORI_PORTRAIT: x = int(p.x / self.__touchXMax * self.__screenCaptureWidth * self.__short_edge_ratio) y = int(p.y / self.__touchYMax * self.__screenCaptureHeight) else: x = int(p.y / self.__touchYMax * self.__screenCaptureHeight) y = int(self.__screenCaptureWidth * self.__short_edge_ratio - p.x / self.__touchXMax * self.__screenCaptureWidth * self.__short_edge_ratio) ret_points.append(TouchPoint(p.trackingId, x, y)) return frame, ret_points
EDL.py
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * import re from copy import deepcopy from base64 import b64decode from multiprocessing import Process from gevent.pywsgi import WSGIServer from tempfile import NamedTemporaryFile from flask import Flask, Response, request from netaddr import IPAddress, iprange_to_cidrs from typing import Callable, List, Any, Dict, cast, Tuple from ssl import SSLContext, SSLError, PROTOCOL_TLSv1_2 class Handler: @staticmethod def write(msg): demisto.info(msg) ''' GLOBAL VARIABLES ''' INTEGRATION_NAME: str = 'EDL' PAGE_SIZE: int = 200 DEMISTO_LOGGER: Handler = Handler() APP: Flask = Flask('demisto-edl') EDL_VALUES_KEY: str = 'dmst_edl_values' EDL_LIMIT_ERR_MSG: str = 'Please provide a valid integer for EDL Size' EDL_MISSING_REFRESH_ERR_MSG: str = 'Refresh Rate must be "number date_range_unit", examples: (2 hours, 4 minutes, ' \ '6 months, 1 day, etc.)' ''' REFORMATTING REGEXES ''' _PROTOCOL_RE = re.compile('^(?:[a-z]+:)*//') _PORT_RE = re.compile(r'^((?:[a-z]+:)*//([a-z0-9\-\.]+)|([a-z0-9\-\.]+))(?:\:[0-9]+)*') _URL_WITHOUT_PORT = r'\g<1>' _INVALID_TOKEN_RE = re.compile(r'(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)') DONT_COLLAPSE = "Don't Collapse" COLLAPSE_TO_CIDR = "To CIDRS" COLLAPSE_TO_RANGES = "To Ranges" ''' HELPER FUNCTIONS ''' def list_to_str(inp_list: list, delimiter: str = ',', map_func: Callable = str) -> str: """ Transforms a list to an str, with a custom delimiter between each list item """ str_res = "" if inp_list: if isinstance(inp_list, list): str_res = delimiter.join(map(map_func, inp_list)) else: raise AttributeError('Invalid inp_list provided to list_to_str') return str_res def get_params_port(params: dict = demisto.params()) -> int: """ Gets port from the integration parameters """ port_mapping: str = params.get('longRunningPort', '') err_msg: str port: int if port_mapping: err_msg = f'Listen Port must be an integer. {port_mapping} is not valid.' if ':' in port_mapping: port = try_parse_integer(port_mapping.split(':')[1], err_msg) else: port = try_parse_integer(port_mapping, err_msg) else: raise ValueError('Please provide a Listen Port.') return port def refresh_edl_context(indicator_query: str, limit: int = 0, collapse_ips: str = DONT_COLLAPSE, panos_compatible: bool = True, url_port_stripping: bool = True) -> str: """ Refresh the cache values and format using an indicator_query to call demisto.searchIndicators Parameters: indicator_query (str): Query that determines which indicators to include in the EDL (Cortex XSOAR indicator query syntax) limit (int): The maximum number of indicators to include in the EDL collapse_ips (str): Whether to collapse IPs to Ranges or CIDRs or not at all panos_compatible (bool): Whether to make the indicators PANOS compatible or not url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not Returns: List(IoCs in output format) """ now = datetime.now() offset = 0 # poll indicators into edl from demisto iocs = find_indicators_to_limit(indicator_query, limit, offset, panos_compatible, url_port_stripping) out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, collapse_ips=collapse_ips) if collapse_ips != DONT_COLLAPSE: while actual_indicator_amount < limit: # from where to start the new poll and how many results should be fetched new_offset = len(iocs) + offset + actual_indicator_amount - 1 new_limit = limit - actual_indicator_amount # poll additional indicators into list from demisto new_iocs = find_indicators_to_limit(indicator_query, new_limit, new_offset) # in case no additional indicators exist - exit if len(new_iocs) == 0: break # add the new results to the existing results iocs += new_iocs # reformat the output out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, collapse_ips=collapse_ips) out_dict["last_run"] = date_to_timestamp(now) demisto.setIntegrationContext(out_dict) return out_dict[EDL_VALUES_KEY] def find_indicators_to_limit(indicator_query: str, limit: int, offset: int = 0, panos_compatible: bool = True, url_port_stripping: bool = False) -> list: """ Finds indicators using demisto.searchIndicators Parameters: indicator_query (str): Query that determines which indicators to include in the EDL (Cortex XSOAR indicator query syntax) limit (int): The maximum number of indicators to include in the EDL offset (int): The starting index from which to fetch incidents panos_compatible (bool): Whether to make the indicators PANOS compatible or not url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not Returns: list: The IoCs list up until the amount set by 'limit' """ if offset: next_page = int(offset / PAGE_SIZE) # set the offset from the starting page offset_in_page = offset - (PAGE_SIZE * next_page) else: next_page = 0 offset_in_page = 0 # the second returned variable is the next page - it is implemented for a future use of repolling iocs, _ = find_indicators_to_limit_loop(indicator_query, limit, next_page=next_page, panos_compatible=panos_compatible, url_port_stripping=url_port_stripping) # if offset in page is bigger than the amount of results returned return empty list if len(iocs) <= offset_in_page: return [] return iocs[offset_in_page:limit + offset_in_page] def find_indicators_to_limit_loop(indicator_query: str, limit: int, total_fetched: int = 0, next_page: int = 0, last_found_len: int = PAGE_SIZE, panos_compatible: bool = True, url_port_stripping: bool = False): """ Finds indicators using while loop with demisto.searchIndicators, and returns result and last page Parameters: indicator_query (str): Query that determines which indicators to include in the EDL (Cortex XSOAR indicator query syntax) limit (int): The maximum number of indicators to include in the EDL total_fetched (int): The amount of indicators already fetched next_page (int): The page we are up to in the loop last_found_len (int): The amount of indicators found in the last fetch panos_compatible (bool): Whether to make the indicators PANOS compatible or not url_port_stripping (bool): Whether to strip the port from URL indicators (if a port is present) or not Returns: (tuple): The iocs and the last page """ iocs: List[dict] = [] if not last_found_len: last_found_len = total_fetched while last_found_len == PAGE_SIZE and limit and total_fetched < limit: formatted_iocs = [] fetched_iocs = demisto.searchIndicators(query=indicator_query, page=next_page, size=PAGE_SIZE).get('iocs', []) if panos_compatible or url_port_stripping: for ioc in fetched_iocs: ioc_value = ioc.get('value', '') if url_port_stripping: ioc_value = _PORT_RE.sub(_URL_WITHOUT_PORT, ioc_value) if panos_compatible: # protocol stripping ioc_value = _PROTOCOL_RE.sub('', ioc_value) # mix of text and wildcard in domain field handling ioc_value = _INVALID_TOKEN_RE.sub('*', ioc_value) # for PAN-OS *.domain.com does not match domain.com # we should provide both # this could generate more than num entries according to PAGE_SIZE if ioc_value.startswith('*.'): ioc_object_copy = deepcopy(ioc) ioc_object_copy['value'] = ioc_value.lstrip('*.') formatted_iocs.append(ioc_object_copy) ioc['value'] = ioc_value formatted_iocs.append(ioc) iocs.extend(formatted_iocs) else: iocs.extend(fetched_iocs) last_found_len = len(fetched_iocs) total_fetched += last_found_len next_page += 1 return iocs, next_page def ip_groups_to_cidrs(ip_range_groups: list): """Collapse ip groups list to CIDRs Args: ip_range_groups (list): a list of lists containing connected IPs Returns: list. a list of CIDRs. """ ip_ranges = [] # type:List for group in ip_range_groups: # handle single ips if len(group) == 1: ip_ranges.append(str(group[0])) continue min_ip = group[0] max_ip = group[-1] moved_ip = False # CIDR must begin with an even LSB # if the first ip does not - separate it from the rest of the range if (int(str(min_ip).split('.')[-1]) % 2) != 0: ip_ranges.append(str(min_ip)) min_ip = group[1] moved_ip = True # CIDR must end with uneven LSB # if the last ip does not - separate it from the rest of the range if (int(str(max_ip).split('.')[-1]) % 2) == 0: ip_ranges.append(str(max_ip)) max_ip = group[-2] moved_ip = True # if both min and max ips were shifted and there are only 2 ips in the range # we added both ips by the shift and now we move to the next range if moved_ip and len(group) == 2: continue else: ip_ranges.append(str(iprange_to_cidrs(min_ip, max_ip)[0].cidr)) return ip_ranges def ip_groups_to_ranges(ip_range_groups: list): """Collapse ip groups list to ranges. Args: ip_range_groups (list): a list of lists containing connected IPs Returns: list. a list of Ranges. """ ip_ranges = [] # type:List for group in ip_range_groups: # handle single ips if len(group) == 1: ip_ranges.append(str(group[0])) continue min_ip = group[0] max_ip = group[-1] ip_ranges.append(str(min_ip) + "-" + str(max_ip)) return ip_ranges def ips_to_ranges(ips: list, collapse_ips): """Collapse IPs to Ranges or CIDRs. Args: ips (list): a list of IP strings. collapse_ips (str): Whether to collapse to Ranges or CIDRs. Returns: list. a list to Ranges or CIDRs. """ ips_range_groups = [] # type:List ips = sorted(ips) if len(ips) > 0: ips_range_groups.append([ips[0]]) if len(ips) > 1: for ip in ips[1:]: appended = False for group in ips_range_groups: if IPAddress(int(ip) + 1) in group or IPAddress(int(ip) - 1) in group: group.append(ip) sorted(group) appended = True if not appended: ips_range_groups.append([ip]) if collapse_ips == COLLAPSE_TO_RANGES: return ip_groups_to_ranges(ips_range_groups) else: return ip_groups_to_cidrs(ips_range_groups) def create_values_for_returned_dict(iocs: list, collapse_ips: str = DONT_COLLAPSE) -> Tuple[dict, int]: """ Create a dictionary for output values """ formatted_indicators = [] ipv4_formatted_indicators = [] ipv6_formatted_indicators = [] for ioc in iocs: value = ioc.get('value') type = ioc.get('indicator_type') if value: if collapse_ips != DONT_COLLAPSE and type == 'IP': ipv4_formatted_indicators.append(IPAddress(value)) elif collapse_ips != DONT_COLLAPSE and type == 'IPv6': ipv6_formatted_indicators.append(IPAddress(value)) else: formatted_indicators.append(value) if len(ipv4_formatted_indicators) > 0: ipv4_formatted_indicators = ips_to_ranges(ipv4_formatted_indicators, collapse_ips) formatted_indicators.extend(ipv4_formatted_indicators) if len(ipv6_formatted_indicators) > 0: ipv6_formatted_indicators = ips_to_ranges(ipv6_formatted_indicators, collapse_ips) formatted_indicators.extend(ipv6_formatted_indicators) return {EDL_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators) def get_edl_ioc_values(on_demand, limit, indicator_query='', last_run=None, cache_refresh_rate=None, collapse_ips: str = DONT_COLLAPSE, panos_compatible: bool = True, url_port_stripping: bool = False) -> str: """ Get the ioc list to return in the edl """ # on_demand ignores cache if on_demand: values_str = get_ioc_values_str_from_context() else: if last_run: cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True) if last_run <= cache_time: values_str = refresh_edl_context(indicator_query, limit=limit, panos_compatible=panos_compatible, url_port_stripping=url_port_stripping, collapse_ips=collapse_ips) else: values_str = get_ioc_values_str_from_context() else: values_str = refresh_edl_context(indicator_query, limit=limit, panos_compatible=panos_compatible, url_port_stripping=url_port_stripping, collapse_ips=collapse_ips) return values_str def get_ioc_values_str_from_context() -> str: """ Extracts output values from cache """ cache_dict = demisto.getIntegrationContext() return cache_dict.get(EDL_VALUES_KEY, '') def try_parse_integer(int_to_parse: Any, err_msg: str) -> int: """ Tries to parse an integer, and if fails will throw DemistoException with given err_msg """ try: res = int(int_to_parse) except (TypeError, ValueError): raise DemistoException(err_msg) return res def validate_basic_authentication(headers: dict, username: str, password: str) -> bool: """ Checks whether the authentication is valid. :param headers: The headers of the http request :param username: The integration's username :param password: The integration's password :return: Boolean which indicates whether the authentication is valid or not """ credentials: str = headers.get('Authorization', '') if not credentials or 'Basic ' not in credentials: return False encoded_credentials: str = credentials.split('Basic ')[1] credentials: str = b64decode(encoded_credentials).decode('utf-8') if ':' not in credentials: return False credentials_list = credentials.split(':') if len(credentials_list) != 2: return False user, pwd = credentials_list return user == username and pwd == password ''' ROUTE FUNCTIONS ''' @APP.route('/', methods=['GET']) def route_edl_values() -> Response: """ Main handler for values saved in the integration context """ params = demisto.params() credentials = params.get('credentials') if params.get('credentials') else {} username: str = credentials.get('identifier', '') password: str = credentials.get('password', '') if username and password: headers: dict = cast(Dict[Any, Any], request.headers) if not validate_basic_authentication(headers, username, password): err_msg: str = 'Basic authentication failed. Make sure you are using the right credentials.' demisto.debug(err_msg) return Response(err_msg, status=401) panos_compatible: bool = params.get('panos_compatible', False) url_port_stripping: bool = params.get('url_port_stripping', False) values = get_edl_ioc_values( on_demand=params.get('on_demand'), limit=try_parse_integer(params.get('edl_size'), EDL_LIMIT_ERR_MSG), last_run=demisto.getIntegrationContext().get('last_run'), indicator_query=params.get('indicators_query'), cache_refresh_rate=params.get('cache_refresh_rate'), panos_compatible=panos_compatible, url_port_stripping=url_port_stripping, collapse_ips=params.get('collapse_ips') ) return Response(values, status=200, mimetype='text/plain') ''' COMMAND FUNCTIONS ''' def test_module(args, params): """ Validates: 1. Valid port. 2. Valid cache_refresh_rate """ get_params_port(params) on_demand = params.get('on_demand', None) if not on_demand: try_parse_integer(params.get('edl_size'), EDL_LIMIT_ERR_MSG) # validate EDL Size was set query = params.get('indicators_query') # validate indicators_query isn't empty if not query: raise ValueError('"Indicator Query" is required. Provide a valid query.') cache_refresh_rate = params.get('cache_refresh_rate', '') if not cache_refresh_rate: raise ValueError(EDL_MISSING_REFRESH_ERR_MSG) # validate cache_refresh_rate value range_split = cache_refresh_rate.split(' ') if len(range_split) != 2: raise ValueError(EDL_MISSING_REFRESH_ERR_MSG) try_parse_integer(range_split[0], 'Invalid time value for the Refresh Rate. Must be a valid integer.') if not range_split[1] in ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'month', 'months', 'year', 'years']: raise ValueError( 'Invalid time unit for the Refresh Rate. Must be minutes, hours, days, months, or years.') parse_date_range(cache_refresh_rate, to_timestamp=True) run_long_running(params, is_test=True) return 'ok', {}, {} def run_long_running(params, is_test=False): """ Start the long running server :param params: Demisto params :param is_test: Indicates whether it's test-module run or regular run :return: None """ certificate: str = params.get('certificate', '') private_key: str = params.get('key', '') certificate_path = str() private_key_path = str() try: port = get_params_port(params) ssl_args = dict() if (certificate and not private_key) or (private_key and not certificate): raise DemistoException('If using HTTPS connection, both certificate and private key should be provided.') if certificate and private_key: certificate_file = NamedTemporaryFile(delete=False) certificate_path = certificate_file.name certificate_file.write(bytes(certificate, 'utf-8')) certificate_file.close() private_key_file = NamedTemporaryFile(delete=False) private_key_path = private_key_file.name private_key_file.write(bytes(private_key, 'utf-8')) private_key_file.close() context = SSLContext(PROTOCOL_TLSv1_2) context.load_cert_chain(certificate_path, private_key_path) ssl_args['ssl_context'] = context demisto.debug('Starting HTTPS Server') else: demisto.debug('Starting HTTP Server') server = WSGIServer(('', port), APP, **ssl_args, log=DEMISTO_LOGGER) if is_test: server_process = Process(target=server.serve_forever) server_process.start() time.sleep(5) server_process.terminate() else: server.serve_forever() except SSLError as e: ssl_err_message = f'Failed to validate certificate and/or private key: {str(e)}' demisto.error(ssl_err_message) raise ValueError(ssl_err_message) except Exception as e: demisto.error(f'An error occurred in long running loop: {str(e)}') raise ValueError(str(e)) finally: if certificate_path: os.unlink(certificate_path) if private_key_path: os.unlink(private_key_path) def update_edl_command(args, params): """ Updates the EDL values and format on demand """ on_demand = demisto.params().get('on_demand') if not on_demand: raise DemistoException( '"Update EDL On Demand" is off. If you want to update the EDL manually please toggle it on.') limit = try_parse_integer(args.get('edl_size', params.get('edl_size')), EDL_LIMIT_ERR_MSG) print_indicators = args.get('print_indicators') query = args.get('query') collapse_ips = args.get('collapse_ips') indicators = refresh_edl_context(query, limit=limit, collapse_ips=collapse_ips) hr = tableToMarkdown('EDL was updated successfully with the following values', indicators, ['Indicators']) if print_indicators == 'true' else 'EDL was updated successfully' return hr, {}, indicators def main(): """ Main """ params = demisto.params() credentials = params.get('credentials') if params.get('credentials') else {} username: str = credentials.get('identifier', '') password: str = credentials.get('password', '') if (username and not password) or (password and not username): err_msg: str = 'If using credentials, both username and password should be provided.' demisto.debug(err_msg) raise DemistoException(err_msg) command = demisto.command() demisto.debug('Command being called is {}'.format(command)) commands = { 'test-module': test_module, 'edl-update': update_edl_command } try: if command == 'long-running-execution': run_long_running(params) else: readable_output, outputs, raw_response = commands[command](demisto.args(), params) return_outputs(readable_output, outputs, raw_response) except Exception as e: err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]' return_error(err_msg) if __name__ in ['__main__', '__builtin__', 'builtins']: main()
common.py
# Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import errno import getopt import getpass import imp import os import platform import re import shutil import subprocess import sys import tempfile import threading import time import zipfile try: from hashlib import sha1 as sha1 except ImportError: from sha import sha as sha1 LOCAL_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) RELEASETOOLS_DIR = os.path.abspath(os.path.join(LOCAL_DIR, '../../../build/tools/releasetools')) sys.path.append(RELEASETOOLS_DIR) # missing in Python 2.4 and before if not hasattr(os, "SEEK_SET"): os.SEEK_SET = 0 class Options(object): pass OPTIONS = Options() OPTIONS.search_path = "out/host/linux-x86" OPTIONS.verbose = False OPTIONS.tempfiles = [] OPTIONS.device_specific = None OPTIONS.extras = {} OPTIONS.info_dict = None # Values for "certificate" in apkcerts that mean special things. SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") class ExternalError(RuntimeError): pass def Run(args, **kwargs): """Create and return a subprocess.Popen object, printing the command line on the terminal if -v was specified.""" if OPTIONS.verbose: print " running: ", " ".join(args) return subprocess.Popen(args, **kwargs) def CloseInheritedPipes(): """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds before doing other work.""" if platform.system() != "Darwin": return for d in range(3, 1025): try: stat = os.fstat(d) if stat is not None: pipebit = stat[0] & 0x1000 if pipebit != 0: os.close(d) except OSError: pass def LoadInfoDict(zip): """Read and parse the META/misc_info.txt key/value pairs from the input target files and return a dict.""" d = {} try: for line in zip.read("META/misc_info.txt").split("\n"): line = line.strip() if not line or line.startswith("#"): continue k, v = line.split("=", 1) d[k] = v except KeyError: # ok if misc_info.txt doesn't exist pass # backwards compatibility: These values used to be in their own # files. Look for them, in case we're processing an old # target_files zip. if "mkyaffs2_extra_flags" not in d: try: d["mkyaffs2_extra_flags"] = zip.read("META/mkyaffs2-extra-flags.txt").strip() except KeyError: # ok if flags don't exist pass if "recovery_api_version" not in d: try: d["recovery_api_version"] = zip.read("META/recovery-api-version.txt").strip() except KeyError: raise ValueError("can't find recovery API version in input target-files") if "tool_extensions" not in d: try: d["tool_extensions"] = zip.read("META/tool-extensions.txt").strip() except KeyError: # ok if extensions don't exist pass try: data = zip.read("META/imagesizes.txt") for line in data.split("\n"): if not line: continue name, value = line.split(" ", 1) if not value: continue if name == "blocksize": d[name] = value else: d[name + "_size"] = value except KeyError: pass def makeint(key): if key in d: d[key] = int(d[key], 0) makeint("recovery_api_version") makeint("blocksize") makeint("system_size") makeint("userdata_size") makeint("cache_size") makeint("recovery_size") makeint("boot_size") d["fstab"] = LoadRecoveryFSTab(zip) return d def LoadRecoveryFSTab(zip): class Partition(object): pass try: data = zip.read("RECOVERY/RAMDISK/etc/recovery.fstab") except KeyError: print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab in %s." % zip data = "" d = {} for line in data.split("\n"): line = line.strip() if not line or line.startswith("#"): continue pieces = line.split() if not (3 <= len(pieces) <= 7): raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) p = Partition() p.mount_point = pieces[0] p.fs_type = pieces[1] p.device = pieces[2] p.length = 0 options = None if len(pieces) >= 4 and pieces[3] != 'NULL': if pieces[3].startswith("/"): p.device2 = pieces[3] if len(pieces) >= 5: options = pieces[4] else: p.device2 = None options = pieces[3] else: p.device2 = None if options: options = options.split(",") for i in options: if i.startswith("length="): p.length = int(i[7:]) else: print "%s: unknown option \"%s\"" % (p.mount_point, i) d[p.mount_point] = p return d def DumpInfoDict(d): for k, v in sorted(d.items()): print "%-25s = (%s) %s" % (k, type(v).__name__, v) def BuildBootableImage(sourcedir, fs_config_file): """Take a kernel, cmdline, and ramdisk directory from the input (in 'sourcedir'), and turn them into a boot image. Return the image data, or None if sourcedir does not appear to contains files for building the requested image.""" if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)): return None ramdisk_img = tempfile.NamedTemporaryFile() img = tempfile.NamedTemporaryFile() if os.access(fs_config_file, os.F_OK): cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")] else: cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")] p1 = Run(cmd, stdout=subprocess.PIPE) p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) p2.wait() p1.wait() assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,) assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,) """check if uboot is requested""" fn = os.path.join(sourcedir, "ubootargs") if os.access(fn, os.F_OK): cmd = ["mkimage"] for argument in open(fn).read().rstrip("\n").split(" "): cmd.append(argument) cmd.append("-d") cmd.append(os.path.join(sourcedir, "kernel")+":"+ramdisk_img.name) cmd.append(img.name) else: cmd = ["mkbootimg", "--kernel", os.path.join(sourcedir, "kernel")] fn = os.path.join(sourcedir, "cmdline") if os.access(fn, os.F_OK): cmd.append("--cmdline") cmd.append(open(fn).read().rstrip("\n")) fn = os.path.join(sourcedir, "base") if os.access(fn, os.F_OK): cmd.append("--base") cmd.append(open(fn).read().rstrip("\n")) fn = os.path.join(sourcedir, "pagesize") if os.access(fn, os.F_OK): cmd.append("--pagesize") cmd.append(open(fn).read().rstrip("\n")) fn = os.path.join(sourcedir, "ramdiskaddr") if os.access(fn, os.F_OK): cmd.append("--ramdiskaddr") cmd.append(open(fn).read().rstrip("\n")) cmd.extend(["--ramdisk", ramdisk_img.name, "--output", img.name]) p = Run(cmd, stdout=subprocess.PIPE) p.communicate() assert p.returncode == 0, "mkbootimg of %s image failed" % ( os.path.basename(sourcedir),) img.seek(os.SEEK_SET, 0) data = img.read() ramdisk_img.close() img.close() return data def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir): """Return a File object (with name 'name') with the desired bootable image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name', otherwise construct it from the source files in 'unpack_dir'/'tree_subdir'.""" prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) if os.path.exists(prebuilt_path): print "using prebuilt %s..." % (prebuilt_name,) return File.FromLocalFile(name, prebuilt_path) else: print "building image from target_files %s..." % (tree_subdir,) fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" return File(name, BuildBootableImage(os.path.join(unpack_dir, tree_subdir), os.path.join(unpack_dir, fs_config))) def UnzipTemp(filename, pattern=None): """Unzip the given archive into a temporary directory and return the name. If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the main file), open for reading. """ tmp = tempfile.mkdtemp(prefix="targetfiles-") OPTIONS.tempfiles.append(tmp) def unzip_to_dir(filename, dirname): cmd = ["unzip", "-o", "-q", filename, "-d", dirname] if pattern is not None: cmd.append(pattern) p = Run(cmd, stdout=subprocess.PIPE) p.communicate() if p.returncode != 0: raise ExternalError("failed to unzip input target-files \"%s\"" % (filename,)) m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) if m: unzip_to_dir(m.group(1), tmp) unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES")) filename = m.group(1) else: unzip_to_dir(filename, tmp) return tmp, zipfile.ZipFile(filename, "r") def GetKeyPasswords(keylist): """Given a list of keys, prompt the user to enter passwords for those which require them. Return a {key: password} dict. password will be None if the key has no password.""" no_passwords = [] need_passwords = [] devnull = open("/dev/null", "w+b") for k in sorted(keylist): # We don't need a password for things that aren't really keys. if k in SPECIAL_CERT_STRINGS: no_passwords.append(k) continue p = Run(["openssl", "pkcs8", "-in", k+".pk8", "-inform", "DER", "-nocrypt"], stdin=devnull.fileno(), stdout=devnull.fileno(), stderr=subprocess.STDOUT) p.communicate() if p.returncode == 0: no_passwords.append(k) else: need_passwords.append(k) devnull.close() key_passwords = PasswordManager().GetPasswords(need_passwords) key_passwords.update(dict.fromkeys(no_passwords, None)) return key_passwords def SignFile(input_name, output_name, key, password, align=None, whole_file=False): """Sign the input_name zip/jar/apk, producing output_name. Use the given key and password (the latter may be None if the key does not have a password. If align is an integer > 1, zipalign is run to align stored files in the output zip on 'align'-byte boundaries. If whole_file is true, use the "-w" option to SignApk to embed a signature that covers the whole file in the archive comment of the zip file. """ if align == 0 or align == 1: align = None if align: temp = tempfile.NamedTemporaryFile() sign_name = temp.name else: sign_name = output_name check = (sys.maxsize > 2**32) if check is True: cmd = ["java", "-Xmx2048m", "-jar", os.path.join(OPTIONS.search_path, "framework", "signapk.jar")] else: cmd = ["java", "-Xmx1024m", "-jar", os.path.join(OPTIONS.search_path, "framework", "signapk.jar")] if whole_file: cmd.append("-w") cmd.extend([key + ".x509.pem", key + ".pk8", input_name, sign_name]) p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) if password is not None: password += "\n" p.communicate(password) if p.returncode != 0: raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,)) if align: p = Run(["zipalign", "-f", str(align), sign_name, output_name]) p.communicate() if p.returncode != 0: raise ExternalError("zipalign failed: return code %s" % (p.returncode,)) temp.close() def CheckSize(data, target, info_dict): """Check the data string passed against the max size limit, if any, for the given target. Raise exception if the data is too big. Print a warning if the data is nearing the maximum size.""" if target.endswith(".img"): target = target[:-4] mount_point = "/" + target if info_dict["fstab"]: if mount_point == "/userdata": mount_point = "/data" p = info_dict["fstab"][mount_point] fs_type = p.fs_type device = p.device if "/" in device: device = device[device.rfind("/")+1:] limit = info_dict.get(device + "_size", None) if not fs_type or not limit: return if fs_type == "yaffs2": # image size should be increased by 1/64th to account for the # spare area (64 bytes per 2k page) limit = limit / 2048 * (2048+64) size = len(data) pct = float(size) * 100.0 / limit msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit) if pct >= 99.0: raise ExternalError(msg) elif pct >= 95.0: print print " WARNING: ", msg print elif OPTIONS.verbose: print " ", msg def ReadApkCerts(tf_zip): """Given a target_files ZipFile, parse the META/apkcerts.txt file and return a {package: cert} dict.""" certmap = {} for line in tf_zip.read("META/apkcerts.txt").split("\n"): line = line.strip() if not line: continue m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+' r'private_key="(.*)"$', line) if m: name, cert, privkey = m.groups() if cert in SPECIAL_CERT_STRINGS and not privkey: certmap[name] = cert elif (cert.endswith(".x509.pem") and privkey.endswith(".pk8") and cert[:-9] == privkey[:-4]): certmap[name] = cert[:-9] else: raise ValueError("failed to parse line from apkcerts.txt:\n" + line) return certmap COMMON_DOCSTRING = """ -p (--path) <dir> Prepend <dir>/bin to the list of places to search for binaries run by this script, and expect to find jars in <dir>/framework. -s (--device_specific) <file> Path to the python module containing device-specific releasetools code. -x (--extra) <key=value> Add a key/value pair to the 'extras' dict, which device-specific extension code may look at. -v (--verbose) Show command lines being executed. -h (--help) Display this usage message and exit. """ def Usage(docstring): print docstring.rstrip("\n") print COMMON_DOCSTRING def ParseOptions(argv, docstring, extra_opts="", extra_long_opts=(), extra_option_handler=None): """Parse the options in argv and return any arguments that aren't flags. docstring is the calling module's docstring, to be displayed for errors and -h. extra_opts and extra_long_opts are for flags defined by the caller, which are processed by passing them to extra_option_handler.""" try: opts, args = getopt.getopt( argv, "hvp:s:x:" + extra_opts, ["help", "verbose", "path=", "device_specific=", "extra="] + list(extra_long_opts)) except getopt.GetoptError, err: Usage(docstring) print "**", str(err), "**" sys.exit(2) path_specified = False for o, a in opts: if o in ("-h", "--help"): Usage(docstring) sys.exit() elif o in ("-v", "--verbose"): OPTIONS.verbose = True elif o in ("-p", "--path"): OPTIONS.search_path = a elif o in ("-s", "--device_specific"): OPTIONS.device_specific = a elif o in ("-x", "--extra"): key, value = a.split("=", 1) OPTIONS.extras[key] = value else: if extra_option_handler is None or not extra_option_handler(o, a): assert False, "unknown option \"%s\"" % (o,) os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + os.pathsep + os.environ["PATH"]) return args def Cleanup(): for i in OPTIONS.tempfiles: if os.path.isdir(i): shutil.rmtree(i) else: os.remove(i) class PasswordManager(object): def __init__(self): self.editor = os.getenv("EDITOR", None) self.pwfile = os.getenv("ANDROID_PW_FILE", None) def GetPasswords(self, items): """Get passwords corresponding to each string in 'items', returning a dict. (The dict may have keys in addition to the values in 'items'.) Uses the passwords in $ANDROID_PW_FILE if available, letting the user edit that file to add more needed passwords. If no editor is available, or $ANDROID_PW_FILE isn't define, prompts the user interactively in the ordinary way. """ current = self.ReadFile() first = True while True: missing = [] for i in items: if i not in current or not current[i]: missing.append(i) # Are all the passwords already in the file? if not missing: return current for i in missing: current[i] = "" if not first: print "key file %s still missing some passwords." % (self.pwfile,) answer = raw_input("try to edit again? [y]> ").strip() if answer and answer[0] not in 'yY': raise RuntimeError("key passwords unavailable") first = False current = self.UpdateAndReadFile(current) def PromptResult(self, current): """Prompt the user to enter a value (password) for each key in 'current' whose value is fales. Returns a new dict with all the values. """ result = {} for k, v in sorted(current.iteritems()): if v: result[k] = v else: while True: result[k] = getpass.getpass("Enter password for %s key> " % (k,)).strip() if result[k]: break return result def UpdateAndReadFile(self, current): if not self.editor or not self.pwfile: return self.PromptResult(current) f = open(self.pwfile, "w") os.chmod(self.pwfile, 0600) f.write("# Enter key passwords between the [[[ ]]] brackets.\n") f.write("# (Additional spaces are harmless.)\n\n") first_line = None sorted = [(not v, k, v) for (k, v) in current.iteritems()] sorted.sort() for i, (_, k, v) in enumerate(sorted): f.write("[[[ %s ]]] %s\n" % (v, k)) if not v and first_line is None: # position cursor on first line with no password. first_line = i + 4 f.close() p = Run([self.editor, "+%d" % (first_line,), self.pwfile]) _, _ = p.communicate() return self.ReadFile() def ReadFile(self): result = {} if self.pwfile is None: return result try: f = open(self.pwfile, "r") for line in f: line = line.strip() if not line or line[0] == '#': continue m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) if not m: print "failed to parse password file: ", line else: result[m.group(2)] = m.group(1) f.close() except IOError, e: if e.errno != errno.ENOENT: print "error reading password file: ", str(e) return result def ZipWriteStr(zip, filename, data, perms=0644): # use a fixed timestamp so the output is repeatable. zinfo = zipfile.ZipInfo(filename=filename, date_time=(2009, 1, 1, 0, 0, 0)) zinfo.compress_type = zip.compression zinfo.external_attr = perms << 16 zip.writestr(zinfo, data) class DeviceSpecificParams(object): module = None def __init__(self, **kwargs): """Keyword arguments to the constructor become attributes of this object, which is passed to all functions in the device-specific module.""" for k, v in kwargs.iteritems(): setattr(self, k, v) self.extras = OPTIONS.extras if self.module is None: path = OPTIONS.device_specific if not path: return try: if os.path.isdir(path): info = imp.find_module("releasetools", [path]) else: d, f = os.path.split(path) b, x = os.path.splitext(f) if x == ".py": f = b info = imp.find_module(f, [d]) self.module = imp.load_module("device_specific", *info) except ImportError: print "unable to load device-specific module; assuming none" def _DoCall(self, function_name, *args, **kwargs): """Call the named function in the device-specific module, passing the given args and kwargs. The first argument to the call will be the DeviceSpecific object itself. If there is no module, or the module does not define the function, return the value of the 'default' kwarg (which itself defaults to None).""" if self.module is None or not hasattr(self.module, function_name): return kwargs.get("default", None) return getattr(self.module, function_name)(*((self,) + args), **kwargs) def FullOTA_Assertions(self): """Called after emitting the block of assertions at the top of a full OTA package. Implementations can add whatever additional assertions they like.""" return self._DoCall("FullOTA_Assertions") def FullOTA_InstallBegin(self): """Called at the start of full OTA installation.""" return self._DoCall("FullOTA_InstallBegin") def FullOTA_InstallEnd(self): """Called at the end of full OTA installation; typically this is used to install the image for the device's baseband processor.""" return self._DoCall("FullOTA_InstallEnd") def IncrementalOTA_Assertions(self): """Called after emitting the block of assertions at the top of an incremental OTA package. Implementations can add whatever additional assertions they like.""" return self._DoCall("IncrementalOTA_Assertions") def IncrementalOTA_VerifyBegin(self): """Called at the start of the verification phase of incremental OTA installation; additional checks can be placed here to abort the script before any changes are made.""" return self._DoCall("IncrementalOTA_VerifyBegin") def IncrementalOTA_VerifyEnd(self): """Called at the end of the verification phase of incremental OTA installation; additional checks can be placed here to abort the script before any changes are made.""" return self._DoCall("IncrementalOTA_VerifyEnd") def IncrementalOTA_InstallBegin(self): """Called at the start of incremental OTA installation (after verification is complete).""" return self._DoCall("IncrementalOTA_InstallBegin") def IncrementalOTA_InstallEnd(self): """Called at the end of incremental OTA installation; typically this is used to install the image for the device's baseband processor.""" return self._DoCall("IncrementalOTA_InstallEnd") class File(object): def __init__(self, name, data): self.name = name self.data = data self.size = len(data) self.sha1 = sha1(data).hexdigest() @classmethod def FromLocalFile(cls, name, diskname): f = open(diskname, "rb") data = f.read() f.close() return File(name, data) def WriteToTemp(self): t = tempfile.NamedTemporaryFile() t.write(self.data) t.flush() return t def AddToZip(self, z): ZipWriteStr(z, self.name, self.data) DIFF_PROGRAM_BY_EXT = { ".gz" : "imgdiff", ".zip" : ["imgdiff", "-z"], ".jar" : ["imgdiff", "-z"], ".apk" : ["imgdiff", "-z"], ".img" : "imgdiff", } class Difference(object): def __init__(self, tf, sf): self.tf = tf self.sf = sf self.patch = None def ComputePatch(self): """Compute the patch (as a string of data) needed to turn sf into tf. Returns the same tuple as GetPatch().""" tf = self.tf sf = self.sf ext = os.path.splitext(tf.name)[1] diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff") ttemp = tf.WriteToTemp() stemp = sf.WriteToTemp() ext = os.path.splitext(tf.name)[1] try: ptemp = tempfile.NamedTemporaryFile() if isinstance(diff_program, list): cmd = copy.copy(diff_program) else: cmd = [diff_program] cmd.append(stemp.name) cmd.append(ttemp.name) cmd.append(ptemp.name) p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, err = p.communicate() if err or p.returncode != 0: print "WARNING: failure running %s:\n%s\n" % (diff_program, err) return None diff = ptemp.read() finally: ptemp.close() stemp.close() ttemp.close() self.patch = diff return self.tf, self.sf, self.patch def GetPatch(self): """Return a tuple (target_file, source_file, patch_data). patch_data may be None if ComputePatch hasn't been called, or if computing the patch failed.""" return self.tf, self.sf, self.patch def ComputeDifferences(diffs): """Call ComputePatch on all the Difference objects in 'diffs'.""" print len(diffs), "diffs to compute" # Do the largest files first, to try and reduce the long-pole effect. by_size = [(i.tf.size, i) for i in diffs] by_size.sort(reverse=True) by_size = [i[1] for i in by_size] lock = threading.Lock() diff_iter = iter(by_size) # accessed under lock def worker(): try: lock.acquire() for d in diff_iter: lock.release() start = time.time() d.ComputePatch() dur = time.time() - start lock.acquire() tf, sf, patch = d.GetPatch() if sf.name == tf.name: name = tf.name else: name = "%s (%s)" % (tf.name, sf.name) if patch is None: print "patching failed! %s" % (name,) else: print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % ( dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name) lock.release() except Exception, e: print e raise # start worker threads; wait for them all to finish. threads = [threading.Thread(target=worker) for i in range(OPTIONS.worker_threads)] for th in threads: th.start() while threads: threads.pop().join() # map recovery.fstab's fs_types to mount/format "partition types" PARTITION_TYPES = { "ext2": "EMMC", "ext3": "EMMC", "ext4": "EMMC", "emmc": "EMMC", "mtd": "MTD", "yaffs2": "MTD", "vfat": "EMMC" } def GetTypeAndDevice(mount_point, info): fstab = info["fstab"] if fstab: return PARTITION_TYPES[fstab[mount_point].fs_type], fstab[mount_point].device else: return None
xla_client_test.py
# Lint as: python3 # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the Python extension-based XLA client.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools import threading import unittest from absl import flags from absl.testing import absltest from absl.testing import parameterized import numpy as np from tensorflow.compiler.xla.python import xla_client # pylint: disable=g-import-not-at-top try: from tensorflow.compiler.xla.python import custom_call_for_test except ImportError: custom_call_for_test = None try: import portpicker except ImportError: portpicker = None # pylint: enable=g-import-not-at-top bfloat16 = xla_client.bfloat16 ops = xla_client.ops FLAGS = flags.FLAGS # We choose to ignore pylint's complaints about complex comprehensions, which we # use widely for parameterizing tests. # pylint: disable=g-complex-comprehension def TestFactory(xla_backend, cloud_tpu=False): tests = [] if not cloud_tpu: int_dtypes = [np.int32, np.int64, np.uint32, np.uint64] # TODO(phawkins): test np.float16, where supported. float_dtypes = [bfloat16, np.float32, np.float64] complex_dtypes = [np.complex64, np.complex128] standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_] else: int_dtypes = [np.int32, np.uint32] float_dtypes = [np.float32] complex_dtypes = [np.complex64] standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_] dlpack_dtypes = int_dtypes + float_dtypes class ComputationTest(parameterized.TestCase): """Base class for running an XLA Computation through the local client.""" def setUp(self): super(ComputationTest, self).setUp() self.backend = xla_backend() def _NewComputation(self, name=None): if name is None: name = self.id() return xla_client.XlaBuilder(name) def _Execute(self, c, arguments): compiled_c = self.backend.compile(c.build()) return xla_client.execute_with_python_values( compiled_c, arguments, backend=self.backend) def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected): assert expected is not None results = self._Execute(c, arguments) self.assertLen(results, len(expected)) for result, e in zip(results, expected): # Numpy's comparison methods are a bit too lenient by treating inputs as # "array-like", meaning that scalar 4 will be happily compared equal to # [[4]]. We'd like to be more strict so assert shapes as well. self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape) assert_func(result, e) def _ExecuteAndCompareExact(self, c, arguments=(), expected=None): self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected) def _ExecuteAndCompareClose(self, c, arguments=(), expected=None, rtol=1e-7, atol=0): self._ExecuteAndAssertWith( functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c, arguments, expected) def NumpyArrayF32(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.float32 dtype.""" return np.array(*args, dtype=np.float32, **kwargs) def NumpyArrayS32(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.int32 dtype.""" return np.array(*args, dtype=np.int32, **kwargs) def NumpyArrayBool(*args, **kwargs): """Convenience wrapper to create Numpy arrays with a np.bool dtype.""" return np.array(*args, dtype=np.bool, **kwargs) class ComputationPrinting(absltest.TestCase): def setUp(self): super(ComputationPrinting, self).setUp() self.backend = xla_backend() def ExampleComputation(self): builder = xla_client.XlaBuilder("acomputation") p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0))) p1 = ops.Parameter( builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32))) x = ops.Mul(p0, p1) ops.Add(x, x) return builder.build() def testComputationToHloText(self): computation = self.ExampleComputation() hlo_text = computation.as_hlo_text() self.assertTrue(hlo_text.startswith("HloModule acomputation")) def testComputationToHloGraph(self): computation = self.ExampleComputation() hlo_dot_graph = computation.as_hlo_dot_graph() self.assertTrue(hlo_dot_graph.startswith("digraph ")) def testHloModuleToHloText(self): computation = self.ExampleComputation() hlo_text = computation.as_hlo_module().to_string() self.assertTrue(hlo_text.startswith("HloModule acomputation")) def testHloModuleToHloGraph(self): computation = self.ExampleComputation() hlo_dot_graph = xla_client._xla.hlo_module_to_dot_graph( computation.as_hlo_module()) self.assertTrue(hlo_dot_graph.startswith("digraph ")) @unittest.skipIf(cloud_tpu, "not implemented") def testCompiledHloModuleToHloText(self): computation = self.ExampleComputation() executable = self.backend.compile(computation) hlo_modules = executable.hlo_modules() self.assertLen(hlo_modules, 1) hlo_text = hlo_modules[0].to_string() self.assertTrue(hlo_text.startswith("HloModule acomputation")) self.assertIn("fusion", hlo_text) tests.append(ComputationPrinting) class ComputationHashTest(absltest.TestCase): def testHash(self): builder0 = xla_client.XlaBuilder("computation0") p0 = ops.Parameter(builder0, 0, xla_client.shape_from_pyval(np.float32(0))) p1 = ops.Parameter( builder0, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32))) ops.Mul(p0, p1) computation0 = builder0.build() builder1 = xla_client.XlaBuilder("computation1") p0 = ops.Parameter(builder1, 0, xla_client.shape_from_pyval(np.float32(0))) p1 = ops.Parameter( builder1, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32))) ops.Mul(p0, p1) computation1 = builder1.build() self.assertEqual(computation0.hash(), computation1.hash()) tests.append(ComputationHashTest) class ComputationsWithConstantsTest(ComputationTest): """Tests focusing on Constant ops.""" @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes + float_dtypes) def testConstantScalarSum(self, dtype): if dtype == np.int8 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support int8") c = self._NewComputation() ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14))) self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantVectorMul(self, dtype): c = self._NewComputation() ops.Mul( ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)), ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype))) self._ExecuteAndCompareClose( c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantVectorScalarDiv(self, dtype): c = self._NewComputation() ops.Div( ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)), ops.Constant(c, dtype(2.0))) self._ExecuteAndCompareClose( c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantVectorScalarPow(self, dtype): c = self._NewComputation() ops.Pow( ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)), ops.Constant(c, dtype(2.))) self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]]) def testIota(self): c = self._NewComputation() ops.Iota(c, xla_client.PrimitiveType.F32, 10) self._ExecuteAndCompareExact( c, expected=[np.arange(10, dtype=np.float32)]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes) def testBroadcastedIota(self, dtype): c = self._NewComputation() shape = xla_client.Shape.array_shape( xla_client.dtype_to_etype(dtype), (2, 3)) ops.Iota(c, shape, 1) expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype) self._ExecuteAndCompareExact(c, expected=[expected]) def testBooleanAnd(self): c = self._NewComputation() ops.And( ops.Constant(c, NumpyArrayBool([True, False, True, False])), ops.Constant(c, NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]]) def testBooleanOr(self): c = self._NewComputation() ops.Or( ops.Constant(c, NumpyArrayBool([True, False, True, False])), ops.Constant(c, NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]]) def testBooleanXor(self): c = self._NewComputation() ops.Xor( ops.Constant(c, NumpyArrayBool([True, False, True, False])), ops.Constant(c, NumpyArrayBool([True, True, False, False]))) self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSum2D(self, dtype): c = self._NewComputation() ops.Add( ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)), ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype))) self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]]) def testShiftLeft(self): c = self._NewComputation() ops.ShiftLeft( ops.Constant(c, NumpyArrayS32([3])), ops.Constant(c, NumpyArrayS32([2]))) self._ExecuteAndCompareClose(c, expected=[[12]]) def testShiftRightArithmetic(self): c = self._NewComputation() ops.ShiftRightArithmetic( ops.Constant(c, NumpyArrayS32([-2])), ops.Constant(c, NumpyArrayS32([1]))) self._ExecuteAndCompareClose(c, expected=[[-1]]) def testShiftRightLogical(self): c = self._NewComputation() ops.ShiftRightLogical( ops.Constant(c, NumpyArrayS32([-1])), ops.Constant(c, NumpyArrayS32([1]))) self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSum2DWith1DBroadcastDim0(self, dtype): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 0 to match the former's shape. c = self._NewComputation() ops.Add( ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)), ops.Constant(c, np.array([10, 20, 30], dtype=dtype)), broadcast_dimensions=(0,)) self._ExecuteAndCompareClose( c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSum2DWith1DBroadcastDim1(self, dtype): # sum of a 2D array with a 1D array where the latter is replicated across # dimension 1 to match the former's shape. c = self._NewComputation() ops.Add( ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)), ops.Constant(c, np.array([10, 20, 30], dtype=dtype)), broadcast_dimensions=(1,)) self._ExecuteAndCompareClose( c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConstantAxpy(self, dtype): c = self._NewComputation() ops.Add( ops.Mul( ops.Constant(c, dtype(2)), ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))), ops.Constant(c, np.array([100, -100, 200, -200], dtype))) self._ExecuteAndCompareClose( c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3) def testCustomCall(self): if self.backend.platform != "cpu": self.skipTest("Test requires cpu platform") c = self._NewComputation() for name, fn in custom_call_for_test.cpu_custom_call_targets.items(): xla_client.register_custom_call_target(name, fn, platform="cpu") ops.CustomCallWithLayout( c, b"test_subtract_f32", operands=[ ops.Constant(c, np.float32(1.25)), ops.Constant(c, np.float32(0.5)) ], shape_with_layout=xla_client.Shape.array_shape( np.dtype(np.float32), (), ()), operand_shapes_with_layout=[ xla_client.Shape.array_shape(np.dtype(np.float32), (), ()), xla_client.Shape.array_shape(np.dtype(np.float32), (), ()), ]) self._ExecuteAndCompareClose(c, expected=[0.75]) tests.append(ComputationsWithConstantsTest) class ComputationFromProtoTest(absltest.TestCase): """Test computation execution from HLO proto.""" def setUp(self): super(ComputationFromProtoTest, self).setUp() self.backend = xla_backend() def testExecuteFromProto(self): # Build the HLO proto b = xla_client.XlaBuilder("computation") ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2))) serialized_proto = b.build().as_serialized_hlo_module_proto() # Load and execute the proto c = xla_client.XlaComputation(serialized_proto) ans, = xla_client.execute_with_python_values( self.backend.compile(c), (), backend=self.backend) np.testing.assert_equal(ans, np.int32(3)) tests.append(ComputationFromProtoTest) class ParametersTest(ComputationTest): """Tests focusing on Parameter ops and argument-passing.""" @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in int_dtypes) def testScalarTimesVector(self, dtype): c = self._NewComputation() arg0 = np.array(3, dtype=dtype) arg1 = np.array([10, 15, -2, 7], dtype=dtype) p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0)) p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1)) ops.Mul(p0, p1) self._ExecuteAndCompareExact( c, arguments=[arg0, arg1], expected=[arg0 * arg1]) # TODO(phawkins): test comparison harness doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testScalarMinusVectorExplicitNumbering(self, dtype): # Use explicit numbering and pass parameter_num first. Sub is used since # it's not commutative and can help catch parameter reversal within the # computation. c = self._NewComputation() arg0 = np.array(2.0, dtype=dtype) arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype) p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1)) p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0)) ops.Sub(p1, p0) self._ExecuteAndCompareClose( c, arguments=[arg0, arg1], expected=[arg1 - arg0]) tests.append(ParametersTest) class BufferTest(ComputationTest): """Tests focusing on execution with Buffers.""" def testConstantSum(self): c = self._NewComputation() ops.Add( ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14))) self._ExecuteAndCompareClose(c, expected=[4.25]) def testOneParameterSum(self): c = self._NewComputation() ops.Add( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))), ops.Constant(c, np.float32(3.14))) self._ExecuteAndCompareClose( c, arguments=[NumpyArrayF32(1.11)], expected=[4.25]) def testTwoParameterSum(self): c = self._NewComputation() ops.Add( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))), ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.)))) self._ExecuteAndCompareClose( c, arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)], expected=[4.25]) @unittest.skipIf(cloud_tpu, "not implemented") def testCannotCallWithDeletedBuffers(self): c = self._NewComputation() ops.Add( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))), ops.Constant(c, np.float32(3.14))) arg = NumpyArrayF32(1.11) compiled_c = self.backend.compile(c.build()) arg_buffer = self.backend.buffer_from_pyval(arg) arg_buffer.delete() with self.assertRaises(RuntimeError): compiled_c.execute([arg_buffer]) def testShape(self): pyval = np.array([[1., 2.]], np.float32) local_buffer = self.backend.buffer_from_pyval(pyval) xla_shape = local_buffer.shape() self.assertEqual(xla_shape.dimensions(), (1, 2)) self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32)) def testBlockHostUntilReadyWorks(self): arg = np.array([[1., 2.]], np.float32) arg_buffer = self.backend.buffer_from_pyval(arg) arg_buffer.block_host_until_ready() # This test merely checks that nothing goes awry when we call # block_host_until_ready(); it's difficult to test anything else. def testCopyToHost(self): arg0 = np.array([[1., 2.]], np.float32) arg1 = np.array([[3., 4.]], np.float32) arg0_buffer = self.backend.buffer_from_pyval(arg0) arg1_buffer = self.backend.buffer_from_pyval(arg1) # Prefetch two buffers using copy_to_host_async, and then retrieve their # values using to_py. arg0_buffer.copy_to_host_async() arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything. arg1_buffer.copy_to_host_async() np.testing.assert_equal(arg0, arg0_buffer.to_py()) np.testing.assert_equal(arg1, arg1_buffer.to_py()) # copy_to_host_async does nothing after to_py is called. arg0_buffer.copy_to_host_async() np.testing.assert_equal(arg0, arg0_buffer.to_py()) def testDevice(self): x = np.arange(8, dtype=np.int32) for device in self.backend.local_devices(): buf = self.backend.buffer_from_pyval(x, device=device) self.assertEqual(buf.device(), device) np.testing.assert_equal(x, buf.to_py()) tests.append(BufferTest) class SingleOpTest(ComputationTest): """Tests for single ops. The goal here is smoke testing - to exercise the most basic functionality of single XLA ops. As minimal as possible number of additional ops are added around the op being tested. """ @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testConcatenate(self, dtype): c = self._NewComputation() args = ( ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)), ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)), ) ops.ConcatInDim(c, args, dimension=0) self._ExecuteAndCompareExact( c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)]) @parameterized.named_parameters({ "testcase_name": "_{}_{}".format(src_dtype.__name__, dst_dtype.__name__), "src_dtype": src_dtype, "dst_dtype": dst_dtype, } for src_dtype, dst_dtype in itertools.permutations( [np.bool, np.int32, np.int64, np.float32, np.float64], 2)) def testConvertElementType(self, src_dtype, dst_dtype): if ((src_dtype in [np.int64, np.float64] or dst_dtype in [np.int64, np.float64]) and self.backend.platform == "tpu"): self.skipTest("TPU doesn't support float64") c = self._NewComputation() x = np.array([0, 1, 0, 0, 1], dtype=src_dtype) ops.ConvertElementType( ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 1) expected = np.array(x, dtype=dst_dtype) self.assertEqual(result[0].shape, expected.shape) self.assertEqual(result[0].dtype, expected.dtype) np.testing.assert_equal(result[0], expected) @parameterized.named_parameters( { "testcase_name": "_{}_{}".format(src_dtype.__name__, dst_dtype.__name__), "src_dtype": src_dtype, "dst_dtype": dst_dtype, } for dtypes in [[np.int32, np.float32], [np.int64, np.float64]] for src_dtype, dst_dtype in itertools.permutations(dtypes, 2)) def testBitcastConvertType(self, src_dtype, dst_dtype): if (np.float64 in (src_dtype, dst_dtype) and self.backend.platform == "tpu"): self.skipTest("TPU doesn't support float64") c = self._NewComputation() x = np.array([0, 1, 0, 0, 1], dtype=src_dtype) ops.BitcastConvertType( ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 1) expected = x.view(dst_dtype) self.assertEqual(result[0].shape, expected.shape) self.assertEqual(result[0].dtype, expected.dtype) np.testing.assert_equal(result[0], expected) # TODO(b/123523486) implement AllToAll on CPU def DISABLED_testAllToAllOneReplica(self): samples = [ NumpyArrayF32([97.0]), NumpyArrayF32([64.0, 117.0]), NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]), ] for lhs in samples[:1]: c = self._NewComputation() ops.AllToAll(ops.Constant(c, lhs), 0, 0) self._ExecuteAndCompareExact(c, expected=[lhs]) def testCrossReplicaSumOneReplica(self): samples = [ NumpyArrayF32(42.0), NumpyArrayF32([97.0]), NumpyArrayF32([64.0, 117.0]), NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]), ] for lhs in samples: c = self._NewComputation() ops.CrossReplicaSum(ops.Constant(c, lhs)) self._ExecuteAndCompareExact(c, expected=[lhs]) def testReplicaId(self): c = self._NewComputation() _ = ops.ReplicaId(c) self._ExecuteAndCompareExact(c, expected=[0]) def testCrossReplicaSumOneReplicaWithSingletonGroup(self): samples = [ NumpyArrayF32(42.0), NumpyArrayF32([97.0]), NumpyArrayF32([64.0, 117.0]), NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]), ] for lhs in samples: c = self._NewComputation() ops.CrossReplicaSum( ops.Constant(c, lhs), xla_client.make_replica_groups([[0]])) self._ExecuteAndCompareExact(c, expected=[lhs]) # TODO(phawkins): np.dot implementation doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testDotMatrixVector(self, dtype): c = self._NewComputation() lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype) rhs = np.array([[10.0], [20.0]], dtype=dtype) ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs)) self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)]) # TODO(phawkins): np.dot implementation doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testDotMatrixMatrix(self, dtype): c = self._NewComputation() lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype) rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype) ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs)) self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)]) def testDotGeneral(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = xla_client.make_dot_dimension_numbers( (([2], [1]), ([0], [0]))) ops.DotGeneral( ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers) self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6) def testDotGeneralWithDotDimensionNumbersProto(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = xla_client.DotDimensionNumbers() dimension_numbers.lhs_contracting_dimensions.append(2) dimension_numbers.rhs_contracting_dimensions.append(1) dimension_numbers.lhs_batch_dimensions.append(0) dimension_numbers.rhs_batch_dimensions.append(0) ops.DotGeneral( ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers) self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6) def testDotGeneralWithPrecisionConfig(self): c = self._NewComputation() rng = np.random.RandomState(0) lhs = NumpyArrayF32(rng.randn(10, 3, 4)) rhs = NumpyArrayF32(rng.randn(10, 4, 5)) dimension_numbers = xla_client.make_dot_dimension_numbers( (([2], [1]), ([0], [0]))) config = xla_client.PrecisionConfig() config.operand_precision.append(config.Precision.HIGH) config.operand_precision.append(config.Precision.HIGHEST) ops.DotGeneral( ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers, precision_config=config) self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6) def testConvGeneralDilatedF32(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 1, 2, 3) rhs = a(1, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NCHW", "OIHW", "NCHW"), 2) ops.ConvGeneralDilated( ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers) result = np.array([[[ [0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.], ]]]) self._ExecuteAndCompareClose(c, expected=[result]) def testConvGeneralDilatedF32WithPrecisionConfig(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 1, 2, 3) rhs = a(1, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NCHW", "OIHW", "NCHW"), 2) config = xla_client.PrecisionConfig() config.operand_precision.append(config.Precision.HIGHEST) config.operand_precision.append(config.Precision.DEFAULT) ops.ConvGeneralDilated( ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers, precision_config=config) result = np.array([[[ [0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.], ]]]) self._ExecuteAndCompareClose(c, expected=[result]) def testConvGeneralDilatedPermutedF32(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 1, 2, 3) rhs = a(1, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NHWC", "OIHW", "CWNH"), 2) ops.ConvGeneralDilated( ops.Constant(c, np.transpose(lhs, (0, 2, 3, 1))), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers) result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.]]]]) self._ExecuteAndCompareClose( c, expected=[np.transpose(result, (1, 3, 0, 2))]) def testConvGeneralDilatedGroupedConvolutionF32(self): c = self._NewComputation() a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32") lhs = a(1, 2, 2, 3) rhs = a(2, 1, 1, 2) * 10 strides = [1, 1] pads = [(1, 0), (0, 1)] lhs_dilation = (2, 1) rhs_dilation = (1, 1) dimension_numbers = xla_client.make_convolution_dimension_numbers( ("NCHW", "OIHW", "NCHW"), 2) feature_group_count = 2 ops.ConvGeneralDilated( ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count) result = np.array([[[ [0., 0., 0.], [10., 20., 0.], [0., 0., 0.], [40., 50., 0.], ], [ [0., 0., 0.], [330., 380., 160.], [0., 0., 0.], [480., 530., 220.], ]]]) self._ExecuteAndCompareClose(c, expected=[result]) def testBooleanNot(self): c = self._NewComputation() arr = NumpyArrayBool([True, False, True]) ops.Not(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[~arr]) def testPopulationCount(self): c = self._NewComputation() arr = NumpyArrayS32([3, 0, 1]) ops.PopulationCount(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])]) def testCountLeadingZeros(self): c = self._NewComputation() arr = NumpyArrayS32([0x7FFF, 0x12345678]) ops.Clz(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[[17, 3]]) def testExp(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Exp(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.exp(arr)]) def testExpm1(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Expm1(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)]) def testRound(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Round(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.round(arr)]) def testLog(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Log(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.log(arr)]) def testLog1p(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Log1p(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)]) def testNeg(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Neg(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[-arr]) def testFloor(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Floor(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.floor(arr)]) def testCeil(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Ceil(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)]) def testAbs(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.]) ops.Abs(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.abs(arr)]) def testTanh(self): c = self._NewComputation() arr = NumpyArrayF32([3.3, 12.1]) ops.Tanh(ops.Constant(c, arr)) self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)]) def testTranspose(self): def _TransposeAndTest(array, permutation): c = self._NewComputation() ops.Transpose(ops.Constant(c, array), permutation) expected = np.transpose(array, permutation) self._ExecuteAndCompareClose(c, expected=[expected]) _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1]) _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0]) _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1]) _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0]) arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32) for permutation in itertools.permutations(range(arr.ndim)): _TransposeAndTest(arr, permutation) _TransposeAndTest(np.asfortranarray(arr), permutation) def testEq(self): c = self._NewComputation() ops.Eq( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])), ops.Constant(c, NumpyArrayS32([4, 2, 3, 1]))) self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]]) def testNe(self): c = self._NewComputation() ops.Ne( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])), ops.Constant(c, NumpyArrayS32([4, 2, 3, 1]))) self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]]) ops.Ne( ops.Constant(c, NumpyArrayF32([-2.0, 0.0, float("nan"), float("nan")])), ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0, float("nan")]))) self._ExecuteAndAssertWith( np.testing.assert_allclose, c, (), expected=[[True, False, True, True]]) def testGt(self): c = self._NewComputation() ops.Gt( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[False, True, True, False, False]]) def testGe(self): c = self._NewComputation() ops.Ge( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[True, True, True, False, False]]) def testLt(self): c = self._NewComputation() ops.Lt( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[False, False, False, True, True]]) def testLe(self): c = self._NewComputation() ops.Le( ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])), ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12]))) self._ExecuteAndCompareExact( c, expected=[[True, False, False, True, True]]) def testMax(self): c = self._NewComputation() ops.Max( ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])), ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0]))) self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]]) def testMaxExplicitBroadcastDim0(self): c = self._NewComputation() ops.Max( ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), ops.Constant(c, NumpyArrayF32([3, 4, 5])), broadcast_dimensions=(0,)) self._ExecuteAndCompareExact( c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]]) def testMaxExplicitBroadcastDim1(self): c = self._NewComputation() ops.Max( ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), ops.Constant(c, NumpyArrayF32([3, 4, 5])), broadcast_dimensions=(1,)) self._ExecuteAndCompareExact( c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]]) def testMin(self): c = self._NewComputation() ops.Min( ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])), ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0]))) self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]]) def testPad(self): c = self._NewComputation() ops.Pad( ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])), ops.Constant(c, NumpyArrayF32(0.0)), xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)])) self._ExecuteAndCompareClose( c, expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]) def testPadWithPaddingConfig(self): c = self._NewComputation() padding_config = xla_client.PaddingConfig() for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]: dimension = xla_client.PaddingConfigDimension() dimension.edge_padding_low = lo dimension.edge_padding_high = hi dimension.interior_padding = interior padding_config.dimensions.append(dimension) ops.Pad( ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])), ops.Constant(c, NumpyArrayF32(0.0)), padding_config) self._ExecuteAndCompareClose( c, expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0], [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]) def testReshape(self): c = self._NewComputation() ops.Reshape( ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])), dimensions=[0, 1], new_sizes=[2, 3]) self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]]) def testCollapse(self): c = self._NewComputation() ops.Collapse( ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])), dimensions=[1, 2]) self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]]) def testRev(self): c = self._NewComputation() ops.Rev( ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])), dimensions=[0, 2]) self._ExecuteAndCompareExact( c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]]) def testReducePrecision(self): c = self._NewComputation() ops.ReducePrecision( ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])), exponent_bits=8, mantissa_bits=7) self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]]) def testClampF32(self): c = self._NewComputation() ops.Clamp( ops.Constant(c, NumpyArrayF32(-1)), ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])), ops.Constant(c, NumpyArrayF32(2))) self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]]) def testClampS32(self): c = self._NewComputation() ops.Clamp( ops.Constant(c, NumpyArrayS32(-1)), ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])), ops.Constant(c, NumpyArrayS32(2))) self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]]) def testSelect(self): c = self._NewComputation() ops.Select( ops.Constant(c, NumpyArrayBool([True, False, False, True, False])), ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])), ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5]))) self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]]) def testSlice(self): c = self._NewComputation() ops.Slice( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0], [3, 2], [1, 1]) self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]]) def testSliceInDim(self): c = self._NewComputation() ops.SliceInDim( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), start_index=1, limit_index=2, stride=1, dimno=1) self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]]) ops.SliceInDim( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), start_index=0, limit_index=3, stride=2, dimno=0) self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]]) def testDynamicSlice(self): c = self._NewComputation() ops.DynamicSlice( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2]) self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]]) def testDynamicUpdateSlice(self): c = self._NewComputation() ops.DynamicUpdateSlice( ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])), [ops.Constant(c, NumpyArrayS32([1, 1]))]) self._ExecuteAndCompareExact( c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]]) def testTuple(self): c = self._NewComputation() ops.Tuple(c, [ ops.Constant(c, np.int32(42)), ops.Constant(c, NumpyArrayF32([1.0, 2.0])), ops.Constant(c, NumpyArrayBool([True, False, False, True])) ]) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 3) np.testing.assert_equal(result[0], 42) np.testing.assert_allclose(result[1], [1.0, 2.0]) np.testing.assert_equal(result[2], [True, False, False, True]) def testGetTupleElement(self): c = self._NewComputation() ops.GetTupleElement( ops.Tuple(c, [ ops.Constant(c, np.int32(42)), ops.Constant(c, NumpyArrayF32([1.0, 2.0])), ops.Constant(c, NumpyArrayBool([True, False, False, True])) ]), 1) self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]]) def testBroadcast(self): c = self._NewComputation() ops.Broadcast( ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,)) self._ExecuteAndCompareExact( c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]]) def testBroadcastInDim(self): c = self._NewComputation() ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0]) self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]]) ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1]) self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]]) def testRngNormal(self): shape = (2, 3) c = self._NewComputation() ops.RngNormal( ops.Constant(c, NumpyArrayF32(0.)), ops.Constant(c, NumpyArrayF32(1.)), shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32, shape)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) # since the result is random, we just check shape and uniqueness self.assertLen(result, 1) self.assertEqual(result[0].shape, shape) self.assertLen(np.unique(result[0]), np.prod(shape)) def testRngUniformF32(self): lo, hi = 2., 4. shape = (2, 3) c = self._NewComputation() ops.RngUniform( ops.Constant(c, NumpyArrayF32(lo)), ops.Constant(c, NumpyArrayF32(hi)), shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32, shape)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) # since the result is random, we just check shape, uniqueness, and range self.assertLen(result, 1) self.assertEqual(result[0].shape, shape) self.assertLen(np.unique(result[0]), np.prod(shape)) self.assertTrue(np.all(lo <= result[0])) self.assertTrue(np.all(result[0] < hi)) def testRngUniformS32(self): lo, hi = 2, 4 shape = (2, 3) c = self._NewComputation() ops.RngUniform( ops.Constant(c, NumpyArrayS32(lo)), ops.Constant(c, NumpyArrayS32(hi)), shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32, shape)) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) # since the result is random, we just check shape, integrality, and range self.assertLen(result, 1) self.assertEqual(result[0].shape, shape) self.assertEqual(result[0].dtype, np.int32) self.assertTrue(np.all(lo <= result[0])) self.assertTrue(np.all(result[0] < hi)) def testCholesky(self): l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]], dtype=np.float32) c = self._NewComputation() ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T)))) self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4) def testSort(self): keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32) c = self._NewComputation() ops.Sort(c, [ops.Constant(c, keys)], is_stable=True) self._ExecuteAndCompareClose( c, expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)]) def testSortKeyVal(self): keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32) values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32) c = self._NewComputation() ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 2) np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]]) np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]]) def testSortCustomComparator(self): b = self._NewComputation("comparator") p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))) q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))) p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0))) q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0))) ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1))) comparator = b.build() keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32) values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32) c = self._NewComputation() ops.Sort( c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=1, comparator=comparator) result = xla_client.execute_with_python_values( self.backend.compile(c.build()), (), backend=self.backend) self.assertLen(result, 2) np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]]) np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]]) def testQR(self): a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]], dtype=np.float32) c = self._NewComputation() ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True)) q, r = self._Execute(c, ()) np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4) def testEigh(self): a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]], dtype=np.float32) a = (a + a.T) / 2 c = self._NewComputation() ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True)) # TODO(b/129396575): Turn this test back on when it passes without # fastmath. # v, w = self._Execute(c, ()) # self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3) def testSVD(self): a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]], dtype=np.float32) c = self._NewComputation() ops.Tuple(c, ops.SVD(ops.Constant(c, a))) u, d, v = self._Execute(c, ()) self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3) def testTriangularSolve(self): a_vals = np.array( [[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]], dtype=np.float32) b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.float32) c = self._NewComputation() ops.TriangularSolve( ops.Constant(c, a_vals), ops.Constant(c, b_vals), left_side=False, lower=True, transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE, unit_diagonal=False) self._ExecuteAndCompareClose( c, expected=[ np.array([ [0.5, 0.08333334, 0.04629629, 0.03367003], [2.5, -0.25, -0.1388889, -0.1010101], [4.5, -0.58333331, -0.32407406, -0.23569024], ], dtype=np.float32) ], rtol=1e-4) def testIsConstant(self): c = self._NewComputation() a = ops.Constant(c, np.int32(3)) b = ops.Constant(c, np.int32(1)) x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0))) const_expr = ops.Sub(b, a) non_const_expr = ops.Mul(const_expr, x) self.assertTrue(c.is_constant(const_expr)) self.assertFalse(c.is_constant(non_const_expr)) def testGather(self): a = np.arange(9).astype(np.int32).reshape((3, 3)) indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32) dnums = xla_client.GatherDimensionNumbers() dnums.offset_dims.append(1) dnums.offset_dims.append(2) dnums.start_index_map.append(0) dnums.start_index_map.append(1) dnums.index_vector_dim = 2 c = self._NewComputation() ops.Gather( ops.Constant(c, a), ops.Constant(c, indices), dnums, slice_sizes=[1, 1]) g, = self._Execute(c, ()) expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32) np.testing.assert_allclose(g, expected, rtol=1e-4) def testFft(self): if self.backend.platform == "tpu": self.skipTest("TPU only supports 1D FFT") shape = [2, 3, 4, 5] rng = np.random.RandomState(0) a = rng.randn(*shape) + 1.0j * rng.randn(*shape) a = a.astype(np.complex64) # FFT c = self._NewComputation() ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:]) self._ExecuteAndCompareClose( c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4) # IFFT c = self._NewComputation() ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:]) self._ExecuteAndCompareClose( c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4) # RFFT b = rng.randn(*shape).astype(np.float32) c = self._NewComputation() ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:]) self._ExecuteAndCompareClose( c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4) # IRFFT c = self._NewComputation() ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8]) self._ExecuteAndCompareClose( c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4) def testNextAfter(self): c = self._NewComputation() ops.NextAfter( ops.Constant(c, np.array([1, 2], dtype=np.float32)), ops.Constant(c, np.array([2, 1], dtype=np.float32))) out, = self._Execute(c, ()) eps = np.finfo(np.float32).eps np.testing.assert_equal( np.array([eps + 1, 2 - eps], dtype=np.float32), out) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testRegularizedIncompleteBeta(self, dtype): x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538], dtype=dtype) a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606], dtype=dtype) b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677], dtype=dtype) c = self._NewComputation() ops.RegularizedIncompleteBeta( ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x)) expected = np.array( [0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155]) self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2) tests.append(SingleOpTest) class EmbeddedComputationsTest(ComputationTest): """Tests for XLA graphs with embedded computations (such as maps).""" def _CreateConstantComputation(self, in_dtype, out_dtype): """Computation (A) -> B that returns a constant 1 for any input.""" c = self._NewComputation("constant_{}_{}_one".format( in_dtype.__name__, out_dtype.__name__)) ops.Parameter(c, 0, xla_client.shape_from_pyval(np.array(0, dtype=in_dtype))) ops.Constant(c, out_dtype(1)) return c.build() def _CreateMulBy2Computation(self, dtype): """Computation (dtype) -> dtype that multiplies its parameter by 2.""" c = self._NewComputation("mul_f32_by2") ops.Mul( ops.Parameter( c, 0, xla_client.shape_from_pyval(np.array( 0, dtype=dtype)).with_major_to_minor_layout_if_absent()), ops.Constant(c, dtype(2.0))) return c.build() def _CreateMulF32ByParamComputation(self): """Computation (f32) -> f32 that multiplies one parameter by the other.""" c = self._NewComputation("mul_f32_by_param") ops.Mul( ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))), ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))) return c.build() def _CreateBinaryAddComputation(self, dtype): """Computation (dtype, dtype) -> dtype that adds its two parameters.""" c = self._NewComputation("add_param0_by_param1") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) shape = shape.with_major_to_minor_layout_if_absent() ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape)) return c.build() def _CreateBinaryGeComputation(self, dtype): """Computation (dtype, dtype) -> bool that tests param0 >= param1.""" c = self._NewComputation("param0_lt_param1") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) shape = shape.with_major_to_minor_layout_if_absent() ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape)) return c.build() def _MakeSample3DArray(self, dtype): return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]], dtype=dtype) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testCall(self, dtype): c = self._NewComputation() ops.Call( c, self._CreateMulBy2Computation(dtype), operands=(ops.Constant(c, dtype(5.0)),)) self._ExecuteAndCompareClose(c, expected=[10.0]) @parameterized.named_parameters({ "testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__), "in_dtype": in_dtype, "out_dtype": out_dtype, } for in_dtype, out_dtype in [[np.float32, np.int32]]) def testMapEachElementToConstant(self, in_dtype, out_dtype): c = self._NewComputation() ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))], self._CreateConstantComputation(in_dtype, out_dtype), [0]) self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testMapMulBy2(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") c = self._NewComputation() ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))], self._CreateMulBy2Computation(dtype), [0]) self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSimpleMapChain(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") # Chains a map of constant-out with a map of mul-by-2 c = self._NewComputation() const = ops.Map( c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))], self._CreateConstantComputation(dtype, dtype), [0]) ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0]) self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]]) # TODO(b/154752816): bfloat16 crashes in evaluator. @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes if dtype != bfloat16) def testDivVectorsWithMap(self, dtype): def DivComputation(): c = self._NewComputation("div_param0_by_param1") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape)) return c.build() c = self._NewComputation() ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)), ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))), DivComputation(), [0]) self._ExecuteAndCompareClose( c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testSelectAndScatter(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") c = self._NewComputation() operand = ops.Constant( c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype)) window_dimensions = (2, 1) window_strides = (1, 2) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.VALID, c.get_shape(operand).dimensions(), window_dimensions, window_strides) ops.SelectAndScatterWithGeneralPadding( operand, select=self._CreateBinaryGeComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, padding=padding, source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)), init_value=ops.Constant(c, np.array(1, dtype=dtype)), scatter=self._CreateBinaryAddComputation(dtype)) self._ExecuteAndCompareClose( c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduce1DtoScalar(self, dtype): c = self._NewComputation() ops.Reduce( c, operands=[ ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)) ], init_values=[ops.Constant(c, dtype(0))], computation=self._CreateBinaryAddComputation(dtype), dimensions_to_reduce=[0]) self._ExecuteAndCompareClose(c, expected=[10]) # TODO(phawkins): test comparison harness doesn't support bfloat16 @parameterized.named_parameters({ "testcase_name": "_{}_dim{}".format(dtype.__name__, dim), "dtype": dtype, "dim": dim, } for dtype in float_dtypes if dtype != bfloat16 for dim in range(2)) def testReduce2DTo1D(self, dtype, dim): input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() ops.Reduce( c, operands=[ops.Constant(c, input_array)], init_values=[ops.Constant(c, dtype(0))], computation=self._CreateBinaryAddComputation(dtype), dimensions_to_reduce=[dim]) self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)]) @parameterized.named_parameters({ "testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims), "dtype": dtype, "dims": tuple(dims) } for dtype in float_dtypes for dims in itertools.permutations(range(3))) def testReduce3DAllPossibleWaysF32(self, dtype, dims): input_array = self._MakeSample3DArray(dtype) c = self._NewComputation() ops.Reduce( c, operands=[ops.Constant(c, input_array)], init_values=[ops.Constant(c, dtype(0))], computation=self._CreateBinaryAddComputation(dtype), dimensions_to_reduce=dims) self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduceWindowValidUnitStrides(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() window_dimensions = (2, 1) window_strides = (1, 1) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.VALID, input_array.shape, window_dimensions, window_strides) ops.ReduceWindowWithGeneralPadding( operand=ops.Constant(c, input_array), init_value=ops.Constant(c, dtype(0)), computation=self._CreateBinaryAddComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=[], window_dilations=[], padding=padding) self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduceWindowSameUnitStrides(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() window_dimensions = (2, 1) window_strides = (1, 1) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.SAME, input_array.shape, window_dimensions, window_strides) ops.ReduceWindowWithGeneralPadding( operand=ops.Constant(c, input_array), init_value=ops.Constant(c, dtype(0)), computation=self._CreateBinaryAddComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=[], window_dilations=[], padding=padding) self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testReduceWindowValidGeneralStrides(self, dtype): if dtype == np.float64 and self.backend.platform == "tpu": self.skipTest("TPU doesn't support float64") input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype) c = self._NewComputation() window_dimensions = (2, 1) window_strides = (1, 2) padding = xla_client.window_padding_type_to_pad_values( xla_client.PaddingType.VALID, input_array.shape, window_dimensions, window_strides) ops.ReduceWindowWithGeneralPadding( operand=ops.Constant(c, input_array), init_value=ops.Constant(c, dtype(0)), computation=self._CreateBinaryAddComputation(dtype), window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=[], window_dilations=[], padding=padding) self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]]) @parameterized.named_parameters({ "testcase_name": "_{}".format(dtype.__name__), "dtype": dtype, } for dtype in float_dtypes) def testWhile(self, dtype): def LessThan10Cond(): c = self._NewComputation("test_lt_10") shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype)) ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.))) return c.build() cond = LessThan10Cond() body = self._CreateMulBy2Computation(dtype) c = self._NewComputation() init = ops.Constant(c, dtype(1.)) ops.While(cond, body, init) self._ExecuteAndCompareClose(c, expected=[16.]) def testConditionalTrue(self): c = self._NewComputation() pred = ops.Constant(c, np.bool_(True)) true_operand = ops.Constant(c, np.float32(3.)) true_computation = self._CreateMulBy2Computation(np.float32) false_operand = ops.Constant(c, np.float32(2.)) false_computation = self._CreateConstantComputation( np.float32, np.float32) ops.Conditional(pred, true_operand, true_computation, false_operand, false_computation) self._ExecuteAndCompareClose(c, expected=[6.]) def testConditionalFalse(self): c = self._NewComputation() pred = ops.Constant(c, np.bool_(False)) true_operand = ops.Constant(c, np.float32(3.)) true_computation = self._CreateMulBy2Computation(np.float32) false_operand = ops.Constant(c, np.float32(2.)) false_computation = self._CreateConstantComputation( np.float32, np.float32) ops.Conditional(pred, true_operand, true_computation, false_operand, false_computation) self._ExecuteAndCompareClose(c, expected=[1.]) @unittest.skipIf(cloud_tpu, "not implemented") def testInfeedS32Values(self): to_infeed = NumpyArrayS32([1, 2, 3, 4]) c = self._NewComputation() ops.GetTupleElement( ops.InfeedWithToken( ops.CreateToken(c), xla_client.shape_from_pyval( to_infeed[0]).with_major_to_minor_layout_if_absent()), 0) compiled_c = self.backend.compile(c.build()) device = self.backend.local_devices()[0] for item in to_infeed: device.transfer_to_infeed(item) for item in to_infeed: result, = xla_client.execute_with_python_values( compiled_c, (), backend=self.backend) self.assertEqual(result, item) @unittest.skipIf(cloud_tpu, "not implemented") def testInfeedTuple(self): to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]])) c = self._NewComputation() ops.GetTupleElement( ops.InfeedWithToken( ops.CreateToken(c), xla_client.shape_from_pyval( to_infeed).with_major_to_minor_layout_if_absent()), 0) compiled_c = self.backend.compile(c.build()) device = self.backend.local_devices()[0] device.transfer_to_infeed(to_infeed) result = xla_client.execute_with_python_values( compiled_c, (), backend=self.backend) self.assertLen(result, 2) np.testing.assert_equal(result[0], to_infeed[0]) np.testing.assert_equal(result[1], to_infeed[1]) @unittest.skipIf(cloud_tpu, "not implemented") def testInfeedThenOutfeedS32(self): to_round_trip = NumpyArrayS32([1, 2, 3, 4]) c = self._NewComputation() x_and_token = ops.InfeedWithToken( ops.CreateToken(c), xla_client.shape_from_pyval( to_round_trip[0]).with_major_to_minor_layout_if_absent()) x = ops.GetTupleElement(x_and_token, 0) token = ops.GetTupleElement(x_and_token, 1) outfeed_shape = xla_client.shape_from_pyval( to_round_trip[0]).with_major_to_minor_layout_if_absent() ops.OutfeedWithToken(x, token, outfeed_shape) compiled_c = self.backend.compile(c.build()) device = self.backend.local_devices()[0] for want in to_round_trip: execution = threading.Thread(target=lambda: compiled_c.execute([])) execution.start() device.transfer_to_infeed(want) got = device.transfer_from_outfeed(outfeed_shape) execution.join() self.assertEqual(want, got) def testScatter(self): a = np.arange(9).astype(np.int32).reshape((3, 3)) scatter_indices = np.array([0, 2], dtype=np.int32) updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32) dnums = xla_client.ScatterDimensionNumbers() dnums.update_window_dims.append(1) dnums.inserted_window_dims.append(0) dnums.scatter_dims_to_operand_dims.append(0) dnums.index_vector_dim = 1 c = self._NewComputation() ops.Scatter( ops.Constant(c, a), ops.Constant(c, scatter_indices), ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32), dnums) expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32) self._ExecuteAndCompareClose(c, expected=[expected]) class ErrorTest(ComputationTest): def setUp(self): super(ErrorTest, self).setUp() self.f32_scalar_2 = NumpyArrayF32(2.0) self.s32_scalar_2 = NumpyArrayS32(2) def testCompileWithWrongElementTypeInLayout(self): c = self._NewComputation() c.set_op_metadata(xla_client.CurrentSourceInfoMetadata()) ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2)) c.clear_op_metadata() options = xla_client.CompileOptions() options.argument_layouts = [ xla_client.Shape.array_shape(np.dtype(np.float32), []) ] def TestFun(): return self.backend.compile(c.build(), compile_options=options) self.assertRaisesRegex( RuntimeError, r".*Invalid argument shape.*" r"expected s32\[\], got f32\[\].*", TestFun) def testInvokeWithWrongElementType(self): c = self._NewComputation() c.set_op_metadata(xla_client.CurrentSourceInfoMetadata()) ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2)) c.clear_op_metadata() def TestFun(): return xla_client.execute_with_python_values( self.backend.compile(c.build()), [self.f32_scalar_2], self.backend) self.assertRaisesRegex( RuntimeError, r"Invalid argument: Argument does not match.*" r"want s32\[\], got f32\[\].*", TestFun) tests.append(EmbeddedComputationsTest) class ComputationRootTest(ComputationTest): """Tests related to setting the root of the computation.""" def testComputationRootDifferentFromLastOp(self): c = self._NewComputation() x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0))) result = ops.Add(x, ops.Constant(c, np.float32(3.14))) ops.Add(result, ops.Constant(c, np.float32(1.618))) arg = NumpyArrayF32(1.0) compiled_c = self.backend.compile(c.build(result)) ans, = xla_client.execute_with_python_values( compiled_c, [arg], backend=self.backend) np.testing.assert_allclose(ans, 4.14) tests.append(ComputationRootTest) class SetShardingTest(ComputationTest): """Tests related to set OpSharding.""" def testSetSharding(self): c = self._NewComputation() sharding = xla_client.OpSharding() sharding.type = sharding.type.REPLICATED sharding.tile_assignment_dimensions.extend([1]) sharding.tile_assignment_devices.extend([0]) c.set_sharding(sharding) x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0))) c.clear_sharding() result = ops.Add(x, ops.Constant(c, np.float32(3.14))) ops.Add(result, ops.Constant(c, np.float32(1.618))) arg = NumpyArrayF32(1.0) compiled_c = self.backend.compile(c.build(result)) ans, = xla_client.execute_with_python_values( compiled_c, [arg], backend=self.backend) np.testing.assert_allclose(ans, 4.14) tests.append(SetShardingTest) class AliasTest(ComputationTest): def testSetUpAlias(self): c = self._NewComputation() p1 = ops.Parameter( c, 0, xla_client.shape_from_pyval( NumpyArrayF32(1.0)).with_major_to_minor_layout_if_absent()) p2 = ops.Parameter( c, 1, xla_client.shape_from_pyval( NumpyArrayF32(1.0)).with_major_to_minor_layout_if_absent()) out = ops.Add(p1, p2) c.setup_alias([], 0, []) c = c.build(out) if self.backend.platform != "tpu": with self.assertRaisesRegex( RuntimeError, "Buffer aliasing is not supported " "by XLA for non-TPU backends"): self.backend.compile(c) tests.append(AliasTest) testcase_shapes = [ (), (1,), (2, 3), (2, 0), (0, 7), (4, 1, 2), (2, 1, 3), (2, 4, 1), (3, 1), (1, 3), ] def FormatShapeAndDtype(shape, dtype): return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape))) class DLPackTest(parameterized.TestCase): def setUp(self): super(DLPackTest, self).setUp() self.backend = xla_backend() if self.backend.platform not in ("cpu", "gpu"): self.skipTest("DLPack requires CPU or GPU") # pylint: disable=g-complex-comprehension @parameterized.named_parameters({ "testcase_name": FormatShapeAndDtype(shape, dtype), "dtype": dtype, "shape": shape } for dtype in dlpack_dtypes for shape in testcase_shapes) def testRoundTrip(self, dtype, shape): x = np.array(np.random.rand(*shape) * 100, dtype=dtype) buffer = self.backend.buffer_from_pyval(x) dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(buffer) del buffer # Free "buffer" to make sure dlt retains ownership. self.assertEqual(type(dlt).__name__, "PyCapsule") y = xla_client._xla.dlpack_managed_tensor_to_buffer( dlt, self.backend) np.testing.assert_array_equal(x, y.to_py()) def testTensorsCanBeConsumedOnceOnly(self): x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32) buffer = self.backend.buffer_from_pyval(x) dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(buffer) def ConsumeDLPackTensor(): _ = xla_client._xla.dlpack_managed_tensor_to_buffer( dlt, self.backend) ConsumeDLPackTensor() self.assertRaisesRegex( RuntimeError, ".*a DLPack tensor may be consumed at most once.*", ConsumeDLPackTensor) tests.append(DLPackTest) class BufferProtocolTest(parameterized.TestCase): def setUp(self): super(BufferProtocolTest, self).setUp() self.backend = xla_backend() if self.backend.platform != "cpu": self.skipTest("Test requires CPU") # pylint: disable=g-complex-comprehension @parameterized.named_parameters({ "testcase_name": FormatShapeAndDtype(shape, dtype), "dtype": dtype, "shape": shape } for dtype in standard_dtypes if dtype != bfloat16 for shape in testcase_shapes) def testRoundTrip(self, dtype, shape): x = np.array(np.random.rand(*shape) * 100, dtype=dtype) x_ptr = x.__array_interface__["data"][0] buffer = self.backend.buffer_from_pyval(x) y = np.array(buffer, copy=False) y_ptr = y.__array_interface__["data"][0] np.testing.assert_array_equal(x, y) # If the input was sufficiently aligned, the input and output should # alias. self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr) self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer()) buffer2 = self.backend.buffer_from_pyval(x, force_copy=True) z = np.array(buffer2, copy=False) self.assertNotEqual(x.__array_interface__["data"][0], z.__array_interface__["data"][0]) def testDeleteWithActiveView(self): x = np.random.randn(20, 10) buffer = self.backend.buffer_from_pyval(x) buffer_ptr = buffer.unsafe_buffer_pointer() y = np.array(buffer, copy=False) buffer.delete() # It is still legal to access `y`; the array view must keep it alive. np.testing.assert_array_equal(x, y) self.assertEqual(y.__array_interface__["data"][0], buffer_ptr) tests.append(BufferProtocolTest) class ProfilerTest(absltest.TestCase): def testTraceMe(self): # TODO(phawkins): These tests just check that the TraceMe context manager # acts like a context manager and doesn't explode. Ideally we'd check that # the profiler saw the traceme too. with xla_client.profiler.TraceMe("test1"): pass with xla_client.profiler.TraceMe("test2", foo=123): pass with self.assertRaises(ValueError): with xla_client.profiler.TraceMe("test3"): raise ValueError("test") @unittest.skipIf(portpicker is None, "Test requires portpicker") def testStartServer(self): port = portpicker.pick_unused_port() server = xla_client.profiler.start_server(port) del server tests.append(ProfilerTest) return tests def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw): # Avoid creating a new backend per test (this causes GPU OOM, and is probably # inefficient). backend_fn = functools.lru_cache(maxsize=None)(backend_fn) for klass in TestFactory(backend_fn, **kw): test = type(test_prefix + klass.__name__, (klass,), {}) # Clean up the qualified names of the tests to not include the test factory. test.__qualname__ = test.__name__ globals_dict[test.__name__] = test if __name__ == "__main__": flags.DEFINE_string("backend", "cpu", "Target backend.") InstantiateTests(globals(), lambda: xla_client.get_local_backend(FLAGS.backend)) absltest.main()
clone_seven_eval_mini_srcgame_add_map_bn.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function USED_DEVICES = "2,3" import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES import sys import threading import time import tensorflow as tf from absl import app from absl import flags from pysc2 import maps from pysc2.lib import stopwatch import lib.config as C import param as P import mini_source_agent_add_map_bn as mini_source_agent from mini_network_add_map_bn import MiniNetwork # from pysc2.env import sc2_env from lib import my_sc2_env as sc2_env from lib.replay_buffer import Buffer from strategy.terran_agent import DummyTerran from strategy_env import SimulatePlatform import unit.protoss_unit as P import unit.terran_unit as T from datetime import datetime import multiprocessing as mp import numpy as np from logging import warning as logging FLAGS = flags.FLAGS flags.DEFINE_bool("training", True, "Whether to train agents.") flags.DEFINE_bool("on_server", True, "Whether is running on server.") flags.DEFINE_bool("debug_mode", True, "Whether is debuging") flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.") flags.DEFINE_string("log_path", "./logs/", "Path for log.") flags.DEFINE_string("device", USED_DEVICES, "Device for training.") # Simple64 flags.DEFINE_string("map", "Simple64", "Name of a map to use.") flags.DEFINE_bool("render", False, "Whether to render with pygame.") flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.") flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.") flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.") flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.") flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.") flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.") flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.") flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.") flags.DEFINE_bool("trace", False, "Whether to trace the code execution.") flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.") flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.") # 20200825-101942_mini # 20200828-160609_source flags.DEFINE_string("restore_model_path", "./model/20200901-213813_mini/", "path for restore model") flags.DEFINE_bool("restore_model", True, "Whether to restore old model") flags.DEFINE_string("restore_from", "mini", "mini (for Thought-Game) or source (for Real game)") flags.DEFINE_string("restore_to", "source", "mini (for Thought-Game) or source (for Real game)") flags.DEFINE_bool("load_latest", False, "Load latest or bestest model, default is False") flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.") flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.") flags.DEFINE_integer("port_num", 14870, "the start port to create distribute tf") flags.DEFINE_integer("max_iters", 100, "the rl agent max run iters") flags.DEFINE_string("game_version", None, "game version of SC2") flags.DEFINE_bool("freeze_head", False, "Whether freeze_head train agents.") flags.DEFINE_bool("use_bn", False, "Whether use batch_norm to training.") flags.DEFINE_bool("use_sep_net", False, "Whether use seperate network for policy and value model.") flags.DEFINE_integer("ob_space_add", 0, "Add state space from thought game.") flags.DEFINE_integer("act_space_add", 5, "Add action space from thought game.") flags.DEFINE_bool("add_image", False, "Whether add image for input.") flags.DEFINE_bool("partial_restore", False, "Whether use partial_restore.") flags.DEFINE_string("weighted_sum_type", "AddWeight", "add weighted sum type: Add, AddWeight, AdaptiveWeight, AttentionWeight, default is AddWeight") flags.DEFINE_string("initial_type", "original", "weight initial type: original, normal, xavier, he, zero, default is original") FLAGS(sys.argv) # set the play map play_map = C.get_map_class('lib.config.' + FLAGS.map) C.my_sub_pos = play_map.my_sub_pos C.enemy_sub_pos = play_map.enemy_sub_pos C.enemy_main_pos = play_map.enemy_main_pos C.base_camera_pos = play_map.base_camera_pos if not FLAGS.on_server or FLAGS.debug_mode: PARALLEL = 1 THREAD_NUM = 1 MAX_AGENT_STEPS = 18000 DEVICE = ['/gpu:0'] NUM_FOR_UPDATE = 1 TRAIN_ITERS = 1 PORT_NUM = FLAGS.port_num else: PARALLEL = FLAGS.parallel THREAD_NUM = FLAGS.thread_num MAX_AGENT_STEPS = FLAGS.max_agent_steps if USED_DEVICES == '-1': DEVICE = ['/cpu:0'] else: DEVICE = ['/gpu:' + str(dev) for dev in range(len(FLAGS.device.split(',')))] NUM_FOR_UPDATE = FLAGS.num_for_update TRAIN_ITERS = FLAGS.max_iters PORT_NUM = FLAGS.port_num LOG = FLAGS.log_path if not os.path.exists(LOG): os.makedirs(LOG) SERVER_DICT = {"worker": [], "ps": []} # define some global variable UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() Counter = 0 Waiting_Counter = 0 Update_Counter = 0 Result_List = [] ''' ps -ef |grep liuruoze | grep 'SC2_x64' | awk '{print $2}' | xargs kill -9 kill -9 `ps -ef |grep liuruoze | grep eval_mini_srcgame_add_map_bn | awk '{print $2}' ` ''' def run_thread(agent, game_num, Synchronizer, difficulty): global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List num = 0 all_num = 0 proc_name = mp.current_process().name C._FPS = 22.4 / FLAGS.step_mul # 5.6 step_mul = FLAGS.step_mul # 4 C.difficulty = difficulty with sc2_env.SC2Env( map_name=FLAGS.map, agent_race=FLAGS.agent_race, bot_race=FLAGS.bot_race, difficulty=difficulty, step_mul=step_mul, score_index=-1, game_steps_per_episode=MAX_AGENT_STEPS, screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution), minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution), visualize=False, game_version=FLAGS.game_version) as env: # env = available_actions_printer.AvailableActionsPrinter(env) agent.set_env(env) while all_num != game_num * TRAIN_ITERS: agent.play_right_add(verbose=FLAGS.debug_mode) if FLAGS.training: # check if the num of episodes is enough to update num += 1 all_num += 1 reward = agent.result['reward'] Counter += 1 Result_List.append(reward) logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" % (int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward)) # time for update if num == game_num: num = 0 ROLLING_EVENT.clear() # worker stops rolling, wait for update if agent.index != 0 and THREAD_NUM > 1: Waiting_Counter += 1 if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop UPDATE_EVENT.set() ROLLING_EVENT.wait() # update! else: if THREAD_NUM > 1: UPDATE_EVENT.wait() Synchronizer.wait() # wait for other processes to update agent.update_network(Result_List) Result_List.clear() agent.global_buffer.reset() Synchronizer.wait() Update_Counter += 1 # finish update UPDATE_EVENT.clear() Waiting_Counter = 0 ROLLING_EVENT.set() if FLAGS.save_replay: env.save_replay(FLAGS.replay_dir) agent.reset() def Worker(index, update_game_num, Synchronizer, cluster, model_path, log_path): config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, ) config.gpu_options.allow_growth = True worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config) sess = tf.Session(target=worker.target, config=config) summary_writer = tf.summary.FileWriter(log_path) Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training, cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)], ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path, ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add, freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn, use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model, restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to, load_latest=FLAGS.load_latest, add_image=FLAGS.add_image, partial_restore=FLAGS.partial_restore, weighted_sum_type=FLAGS.weighted_sum_type, initial_type=FLAGS.initial_type) global_buffer = Buffer() agents = [] for i in range(THREAD_NUM): agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training, strategy_agent=None, ob_space_add=FLAGS.ob_space_add) agents.append(agent) print("Worker %d: waiting for cluster connection..." % index) sess.run(tf.report_uninitialized_variables()) print("Worker %d: cluster ready!" % index) while len(sess.run(tf.report_uninitialized_variables())): print("Worker %d: waiting for variable initialization..." % index) time.sleep(1) print("Worker %d: variables initialized" % index) game_num = np.ceil(update_game_num // THREAD_NUM) UPDATE_EVENT.clear() ROLLING_EVENT.set() # Run threads threads = [] for i in range(THREAD_NUM - 1): t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty)) threads.append(t) t.daemon = True t.start() time.sleep(3) run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty) for t in threads: t.join() def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs): config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, ) config.gpu_options.allow_growth = True server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config) sess = tf.Session(target=server.target, config=config) summary_writer = tf.summary.FileWriter(log_path) Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training, cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)], ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path, ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add, freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn, use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model, restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to, load_latest=FLAGS.load_latest, add_image=FLAGS.add_image, partial_restore=FLAGS.partial_restore, weighted_sum_type=FLAGS.weighted_sum_type, initial_type=FLAGS.initial_type) agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training, ob_space_add=FLAGS.ob_space_add) print("Parameter server: waiting for cluster connection...") sess.run(tf.report_uninitialized_variables()) print("Parameter server: cluster ready!") print("Parameter server: initializing variables...") agent.init_network() print("Parameter server: variables initialized") update_counter = 0 max_win_rate = 0. latest_win_rate = 0. while update_counter < TRAIN_ITERS: agent.reset_old_network() # wait for update Synchronizer.wait() logging("Update Network!") # TODO count the time , compare cpu and gpu time.sleep(1) # update finish Synchronizer.wait() logging("Update Network finished!") steps, win_rate = agent.update_summary(update_counter) logging("Steps: %d, win rate: %f" % (steps, win_rate)) update_counter += 1 if win_rate >= max_win_rate: agent.save_model() max_win_rate = win_rate latest_win_rate = win_rate agent.net.save_latest_policy() return max_win_rate, latest_win_rate def _main(unused_argv): # create distribute tf cluster start_port = PORT_NUM SERVER_DICT["ps"].append("localhost:%d" % start_port) for i in range(PARALLEL): SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i)) Cluster = tf.train.ClusterSpec(SERVER_DICT) now = datetime.now() model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/" if not os.path.exists(model_path): os.makedirs(model_path) log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/" UPDATE_GAME_NUM = NUM_FOR_UPDATE per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL) Synchronizer = mp.Barrier(PARALLEL + 1) # Run parallel process procs = [] for index in range(PARALLEL): p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path, log_path)) procs.append(p) p.daemon = True p.start() time.sleep(1) max_win_rate, latest_win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs) print('#######################') print('Best Win_rate:', max_win_rate) print('Latest Win_rate:', latest_win_rate) print('#######################') for p in procs: p.join() ''' if FLAGS.profile: print(stopwatch.sw) ''' if __name__ == "__main__": app.run(_main)
eventing_rqg.py
import os, re import zipfile import datetime import logging from threading import Thread from lib.membase.api.rest_client import RestConnection from lib.testconstants import STANDARD_BUCKET_PORT from pytests.eventing.eventing_constants import HANDLER_CODE, HANDLER_CODE_ERROR from pytests.eventing.eventing_base import EventingBaseTest, log from lib.couchbase_helper.tuq_helper import N1QLHelper from string import Template log = logging.getLogger() class EventingRQG(EventingBaseTest): def setUp(self): super(EventingRQG, self).setUp() if self.create_functions_buckets: self.bucket_size = 100 log.info(self.bucket_size) bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size, replicas=self.num_replicas) self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1, bucket_params=bucket_params) self.src_bucket = RestConnection(self.master).get_buckets() self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1, bucket_params=bucket_params) self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1, bucket_params=bucket_params) self.buckets = RestConnection(self.master).get_buckets() self.gens_load = self.generate_docs(self.docs_per_day) self.expiry = 3 self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql") self.n1ql_helper = N1QLHelper(shell=self.shell, max_verify=self.max_verify, buckets=self.buckets, item_flag=self.item_flag, n1ql_port=self.n1ql_port, full_docs_list=self.full_docs_list, log=self.log, input=self.input, master=self.master, use_rest=True ) self.number_of_handler = self.input.param('number_of_handler', 5) self.number_of_queries = self.input.param('number_of_queries', None) self.template_file=self.input.param('template_file', 'b/resources/rqg/simple_table_db/query_tests_using_templates/query_10000_fields.txt.zip') having_map = {"STRING_FIELD ": "email ", "NUMERIC_FIELD ": "age ", "UPPER_BOUND_VALUE": "8", "LOWER_BOUND_VALUE": "0", "NUMERIC_FIELD_LIST": "age", "STRING_FIELD_LIST": "email", "( LIST )": "[1,2,3]"} update_map = {"STRING_FIELD ": "email ", "NUMERIC_FIELD ": "age ", "UPPER_BOUND_VALUE": "8", "LOWER_BOUND_VALUE": "0", "NUMERIC_FIELD_LIST": "age", "STRING_FIELD_LIST": "email", "( LIST )": "[1,2,3]","STRING_FIELD,NUMERIC_FIELD,DATETIME_FIELD":"email=\"update@a.c\",age=4,created=\"2010-09-15 00:00:00\""} join_map = {"PREVIOUS_TABLE.FIELD":"src_bucket.email","CURRENT_TABLE.FIELD":"_bucket.email","STRING_FIELD ": "email ", "NUMERIC_FIELD ": "age ", "UPPER_BOUND_VALUE": "8", "LOWER_BOUND_VALUE": "0", "NUMERIC_FIELD_LIST": "age", "STRING_FIELD_LIST": "email", "( LIST )": "[1,2,3]"} field_map = {"NUMERIC_VALUE":"0","'%STRING_VALUES%'":"\"%a@b.c%\"","'%STRING_VALUES'":"\"%a@b.c\"","'STRING_VALUES%'":"\"a@b.c%\"","'STRING_VALUES'":"\"a@b.c\""} def tearDown(self): super(EventingRQG, self).tearDown() def test_random_n1ql(self): test_file_path = self.unzip_template( self.template_file) with open(test_file_path) as f: query_list = f.readlines() self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node) log.info(len(query_list)) k = self.number_of_handler if self.number_of_queries is None: s = len(query_list) else: s = self.number_of_queries for j in range(0, s, k): try: threads = [] for i in range(j, j + k): if i >= s: break threads.append(Thread(target=self.create_function_and_deploy, args={query_list[i]})) for thread in threads: thread.start() for thread in threads: thread.join() key = datetime.datetime.now().time() self.sleep(10) query = "insert into src_bucket (KEY, VALUE) VALUES (\"" + str(key) + "\",\"doc created\")" self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node) self.sleep(10) self.eventing_stats() except Exception as e: log.error(e) finally: self.undeploy_delete_all_functions() self.delete_temp_handler_code() self.verify_n1ql_stats(s) def test_queries(self): test_file_path = self.template_file with open(test_file_path) as f: query_list = f.readlines() self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node) k = self.number_of_handler if self.number_of_queries is None: s = len(query_list) else: s = self.number_of_queries log.info(s) for j in range(0, s, k): try: threads = [] for i in range(j, j + k): if i >= s: break threads.append(Thread(target=self.create_function_and_deploy, args=(query_list[i], False))) for thread in threads: thread.start() for thread in threads: thread.join() key = datetime.datetime.now().time() query = "insert into src_bucket (KEY, VALUE) VALUES (\""+str(key)+"\",{\"email\":\"a@b.c\"})" self.log.info("insert doc:{}".format(query)) self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node) self.sleep(10) self.eventing_stats() except Exception as e: log.error(e) finally: self.undeploy_delete_all_functions() self.delete_temp_handler_code() self.verify_n1ql_stats(s) def create_function_and_deploy(self, query, replace=True): log.info("creating handler code for :{}".format(query)) if replace: file_path = self.generate_eventing_file(self._convert_template_n1ql(query)) else: file_path = self.generate_eventing_file(query) self.sleep(10) ts = datetime.datetime.now().strftime('%m%d%y%H%M%S%f') body = self.create_save_function_body(self.function_name + str(ts), file_path, dcp_stream_boundary="from_now", worker_count=1, execution_timeout=60) self.deploy_function(body) def _convert_template_n1ql(self, query): n1ql = str(query).replace("BUCKET_NAME", self.src_bucket_name) n1ql = str(n1ql).replace("TRUNCATE", "TRUNC") for k, v in list(self.field_map.items()): n1ql = str(n1ql).replace(k, v) if "HAVING" in n1ql: for k, v in list(self.having_map.items()): n1ql=str(n1ql).replace(k, v) group_fields = re.search(r'GROUP BY(.*?)HAVING', n1ql).group(1) n1ql = n1ql.replace("GROUPBY_FIELDS", group_fields) elif "GROUP BY" in n1ql: for k, v in list(self.having_map.items()): n1ql=str(n1ql).replace(k, v) group_fields = re.search(r'GROUP BY(.*?);', n1ql).group(1) n1ql = n1ql.replace("GROUPBY_FIELDS", group_fields) if "UPDATE" in n1ql: for k, v in list(self.update_map.items()): n1ql = str(n1ql).replace(k, v) n1ql = n1ql.replace(self.src_bucket_name, self.dst_bucket_name) if "JOIN" in n1ql: for k, v in list(self.join_map.items()): n1ql = str(n1ql).replace(k, v) return n1ql def generate_eventing_file(self, query): h_code = self.input.param('handler_code', 'n1ql_with_exec') if h_code == "n1ql_with_exec": handler_code = HANDLER_CODE.N1QL_TEMP else: handler_code = HANDLER_CODE.N1QL_TEMP_WITHOUT_EXEC try: if not os.path.exists(HANDLER_CODE.N1QL_TEMP_PATH): os.makedirs(HANDLER_CODE.N1QL_TEMP_PATH) except OSError as err: print(err) script_dir = os.path.dirname(__file__) abs_file_path = os.path.join(script_dir, handler_code) fh = open(abs_file_path, "r") code = Template(fh.read()).substitute(n1ql=query) fh.close() ts = datetime.datetime.now().strftime('%m%d%y%H%M%S%f') temp_file_path = HANDLER_CODE.N1QL_TEMP_PATH + "f_" + ts + ".js" abs_file_path = os.path.join(script_dir, temp_file_path) fw = open(abs_file_path, "w+") fw.write(code) fw.close() return temp_file_path def unzip_template(self, template_path): if "zip" not in template_path: return template_path tokens = template_path.split("/") file_name = tokens[len(tokens) - 1] output_path = template_path.replace(file_name, "") with zipfile.ZipFile(template_path, "r") as z: z.extractall(output_path) template_path = template_path.replace(".zip", "") return template_path def delete_temp_handler_code(self, path=HANDLER_CODE.N1QL_TEMP_PATH): log.info("deleting all the handler codes") script_dir = os.path.dirname(__file__) dirPath = os.path.join(script_dir, path) fileList = os.listdir(dirPath) for fileName in fileList: os.remove(dirPath + "/" + fileName) def verify_n1ql_stats(self, total_query): n1ql_query = "select failed_query from dst_bucket where failed_query is not null" failed = self.n1ql_helper.run_cbq_query(query=n1ql_query, server=self.n1ql_node) n1ql_query = "select passed_query from dst_bucket where passed_query is not null" passed = self.n1ql_helper.run_cbq_query(query=n1ql_query, server=self.n1ql_node) log.info("passed: {}".format(len(passed["results"]))) log.info("failed: {}".format(len(failed["results"]))) assert len(passed["results"]) + len(failed["results"]) == total_query assert len(failed["results"]) == 0, "failed queries are {0}".format(failed["results"])
line_reader.py
""" Created on 14 Jul 2017 @author: Bruno Beloff (bruno.beloff@southcoastscience.com) A portable, non-blocking Python line reader. Yes, such a thing exists. And this is it... """ import os from multiprocessing import Process, Queue # -------------------------------------------------------------------------------------------------------------------- class LineReader(object): """ classdocs """ def __init__(self, fileno): """ Constructor """ self.__fileno = fileno self.__queue = Queue() # ---------------------------------------------------------------------------------------------------------------- def start(self): proc = Process(target=self.run) proc.start() return proc def run(self): file = os.fdopen(self.__fileno) try: for line in file: self.__queue.put(str(line).strip()) except (ConnectionError, KeyboardInterrupt, SystemExit): pass self.__queue.put(None) # ---------------------------------------------------------------------------------------------------------------- @property def lines(self): while True: if self.__queue.empty(): line = None else: line = self.__queue.get() if line is None: return yield line # ---------------------------------------------------------------------------------------------------------------- def __str__(self, *args, **kwargs): return "LineReader:{fileno:%s, queue:%s}" % (self.__fileno, self.__queue)
parser.py
import logging import queue import threading import requests import cfg from .app_constants import * class FileParser: """ Description: FileParser class contains methods which collectively work together to parse a document. \ This document contains image URLs, one per line. FileParser parses the document and put \ the URLs into a queue which is consumed by downloader threads. Version: 1.0 Comment: """ def __init__ ( self, url_fd ): self.logger = logging.getLogger ( __name__ ) # File descriptor to the file containing urls (provided by user through command line argument) self.url_fd = url_fd # Name to the file containing urls (provided by user through command line argument) self.url_fname = self.url_fd.name # BOM encoding type (a string) of the plaintext file provided in the argument self.encoding = self.detect_bom_markers ( ) # this queue holds only serviceable urls which are consumed by downloader threads # FileParser is the producer of urls in this queue, while Downloader is consumer self.url_queue = queue.Queue ( maxsize=50 ) # file parser thread self.parser_thread = threading.Thread ( target=self.parse_image_url_file ) def detect_bom_markers ( self ): ''' This function detects the encoding of the plaintext file provided in the argument by reading the first 4 bytes of the document. Specification Reference: http://unicodebook.readthedocs.io/guess_encoding.html#check-for-bom-markers Stackoverflow Reference: https://stackoverflow.com/questions/13590749/reading-unicode-file-data-with-bom-chars-in-python Return: BOM encoding type (a string) ''' # codecs module provides the constants which are useful for reading and writing to platform dependent files. # BOM (Byte Order Mark) is used to detect the endianness of a UTF-16 or UTF-32 byte sequence and is also \ # used as a signature to guess the encoding import codecs # used to get defined BOM constants BOMS = ( (codecs.BOM_UTF8, "UTF-8-SIG"), (codecs.BOM_UTF32_BE, "UTF-32-BE"), (codecs.BOM_UTF32_LE, "UTF-32-LE"), (codecs.BOM_UTF16_BE, "UTF-16-BE"), (codecs.BOM_UTF16_LE, "UTF-16-LE"), ) data = self.url_fd.read ( 4 ) # reading 4 bytes from the beginning of the file for bom, encoding in BOMS: if data.startswith ( bom ): self.url_fd.close ( ) self.url_fd = None return encoding self.url_fd.close ( ) self.url_fd = None # For most plaintext programs, UTF-8 is the default encoding signature which python calls UTF-8-SIG # Reference: https://docs.python.org/2/library/codecs.html return "UTF-8-SIG" def is_url_serviceable ( self, url, reattempt_count=cfg.APP_CFG.get ( MAX_DOWNLOAD_REATTEMPTS ) ): """ Verifying whether or not, the url is a downloadable image resource by retrieving meta-information \ written in response headers, without having to transport the entire content. HTTP HEAD is cpu economic for application to verify whether or not, the url is serviceable. :param url: string :param reattempt_count: integer (Number of reattempts to make if the request times out) :return: boolean (If the url is serviceable, it returns True, otherwise False) """ try: response = requests.head ( url, allow_redirects=True, stream=False, timeout=cfg.APP_CFG[ URL_TIMEOUT ], proxies=cfg.APP_CFG[ SYSTEM_PROXY ] ) # Raises stored HTTPError, if one occurred. response.raise_for_status ( ) # Reference: http://docs.python-requests.org/en/master/api/#exceptions except requests.exceptions.Timeout as t_err: # Maybe set up for a retry, or continue in a retry loop self.logger.info ( "For URL: {0} - An exception of type {1} occurred. Arguments:\n{2!r}".format ( url, type ( t_err ).__name__, t_err.args ) ) if not reattempt_count: self.logger.debug ( "URL {} has not been downloaded.".format ( url ) ) return False return self.is_url_serviceable ( url, reattempt_count - 1 ) except (requests.exceptions.ConnectionError, # connection-related errors requests.exceptions.HTTPError, # 401 Unauthorized requests.exceptions.URLRequired, # invalid URL requests.exceptions.TooManyRedirects, # request exceeds the configured number of max redirections requests.exceptions.RequestException # Mother of all requests exceptions. it's doomsday :D ) as err: self.logger.info ( "For URL: {0} - An exception of type {1} occurred. Arguments:\n{2!r}".format ( url, type ( err ).__name__, err.args ) ) self.logger.debug ( "URL {} is not serviceable.".format ( url ) ) return False content_type = response.headers.get ( 'content-type' ) if not content_type: return False # Verify download resource is an image if 'image' in content_type.lower ( ): return True if 'png' in content_type.lower ( ): return True self.logger.debug ( "URL {} is not serviceable.".format ( url ) ) return False def start_parser_thread ( self ): """ Starts parser thread :return: """ self.parser_thread.start ( ) def wait_for_parser_thread ( self ): """ Waits for file parser thread :return: """ self.parser_thread.join ( ) def parse_image_url_file ( self ): """ Parse the file that contains url per line and put it into a queue if the url is serviceable. :return: """ with open ( file=self.url_fname, mode='r', encoding=self.encoding, newline=None ) as fd: for line_terminated in fd: # removing newline from line_terminated url = line_terminated.rstrip ( '\n' ) if self.is_url_serviceable ( url ): self.url_queue.put ( item=url, block=True, timeout=None ) # "EXIT" will be used by downloader threads to terminate themselves self.url_queue.put ( item="EXIT", block=True, timeout=None )
latency.py
import argparse import logging import os import sys import time import requests import threading from bitstring import BitArray from datetime import datetime, timedelta from npi import get_correct_utc_time from npi.config import Config from npi.rtcm import RTCM3 MESSAGE = 100 logging.addLevelName(MESSAGE, "MESSAGE") logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | (%(threadName)-10s) | %(message)s") _log = logging.getLogger(__name__) def main(): _log.info("Latency tool starting...") args = process_arguments() # TODO # - command line arguments for config file, output directory, ??? _log.info("Reading configuration from [%s]", "config/config.yaml") config = Config("config/config.yaml") threads = [] _log.info("Creating handler thread for each stream...") for type, format, id, url, username, password, count in config.get_streams(): _log.debug("Adding thread for stream url=[%s] username=[%s] id=[%s] count=[%3d]", url, username, id, count) t = threading.Thread(target=worker, args=(args, username, password, url, id, count)) threads.append(t) t.start() def worker(args, username, password, url, id, num_messages): from logging.handlers import TimedRotatingFileHandler filename = os.path.join("data", "latency", "latency.{id}.dat".format(id=id)) # TODO use atTime to get the interval start on the epoch boundary rather than whenever it starts # handler = TimedRotatingFileHandler(filename=filename, when="S", interval=30, delay=True, utc=True) # at = datetime.now().time() # # at.replace(hour=at.hour + 1, minute=0, second=0) # # handler = TimedRotatingFileHandler(filename=filename, when="S", interval=30, delay=True, utc=True, atTime=at) # at = datetime.utcnow().time() # # at.replace(minute=at.minute + 1, second=0) # # handler = TimedRotatingFileHandler(filename=filename, when="H", interval=1, delay=True, utc=True, atTime=at) handler = TimedRotatingFileHandler(filename=filename, when="H", interval=1, delay=True, utc=True) handler.setLevel(MESSAGE) formatter = logging.Formatter("%(asctime)s.%(msecs)d,%(message)s", datefmt="%Y-%m-%d.%H:%M:%S") formatter.converter = time.gmtime handler.setFormatter(formatter) _log.addHandler(handler) if num_messages: _log.info("Starting processing username=[%s] url=[%s] filename=[%s] messages=[% 4d]", username, url, filename, num_messages) else: _log.info("Starting processing username=[%s] url=[%s] filename=[%s]", username, url, filename) auth = (username, password) headers = {"User-Agent": "NTRIP ACS_PYTHON/0.1", "Ntrip-Version": "Ntrip/2.0"} proxies = None if args.noproxy: proxies = {"http": None, "https": None} done = False count = 0 count_skipped = 0 while not done: try: _log.info("Connecting...") with requests.get(url, auth=auth, headers=headers, proxies=proxies, stream=True, timeout=300) as response: _log.info("Connected - starting to read messages...") while True: length, message_number, crc, reference_station_id, epoch, epoch_dtm, data = RTCM3.get_next_message(response.raw) current_dtm = get_correct_utc_time() _log.debug("frame length=[%s] message number=[%s]", length, message_number) if length and message_number and crc: if RTCM3.is_msm_message(message_number): _log.debug("message number=[%d] epoch=[%d] epoch_dtm=[%s] current_dtm=[%s] latency=[%s] bits=[%s]", message_number, epoch, epoch_dtm, current_dtm, current_dtm - epoch_dtm, " ".join(["{0:08b}".format(x) for x in data[:12]])) _log.log(MESSAGE, "{},{},{},{},{},{},{}".format(message_number, epoch, epoch_dtm, current_dtm, current_dtm - epoch_dtm, length, " ".join(["{0:08b}".format(x) for x in data[:12]]))) count += 1 else: count_skipped += 1 if num_messages and count >= num_messages: done = True _log.info("Processed [%d] messages skipped [%d] ----> done = [%s]", count, count_skipped, done) if done: break except Exception as e: _log.info("Caught exception...reconnecting...\n%s", e) _log.info("count=[%d] skipped=[%d]", count, count_skipped) def process_arguments(): parser = argparse.ArgumentParser(prog=sys.argv[0], description=__name__) group = parser.add_mutually_exclusive_group() group.add_argument("--quiet", help="Less output (WARNINGS and higher)", action="store_const", dest="log_level", const=logging.WARN) group.add_argument("--verbose", help="More output (DEBUG and higher)", action="store_const", dest="log_level", const=logging.DEBUG) parser.set_defaults(log_level=logging.INFO) parser.add_argument("--no-proxy", help="Don't use proxy", action="store_true", dest="noproxy", default=False) args = parser.parse_args() _log.setLevel(args.log_level) _log.debug("Logging level = [%s]", logging.getLevelName(args.log_level)) _log.debug("Ignore proxy = [%s]", args.noproxy) return args if __name__ == "__main__": main()
ddns.py
"""Module of utilities to update the IP address of an URL via DDNS.""" import os import time from threading import Thread import requests from extutils.logger import LoggerSkeleton __all__ = ("activate_ddns_update",) LOGGER = LoggerSkeleton("sys.ddnsupdate", logger_name_env="DDNS_UPDATE") ENABLED = True DDNS_PASSWORD = os.environ.get("DDNS_PASSWORD") if not DDNS_PASSWORD: LOGGER.logger.error("DDNS_PASSWORD not found in environment variables.") ENABLED = False DDNS_HOST = os.environ.get("DDNS_HOST") if not DDNS_HOST: LOGGER.logger.error("DDNS_HOST not found in environment variables.") ENABLED = False DDNS_DOMAIN = os.environ.get("DDNS_DOMAIN") if not DDNS_DOMAIN: LOGGER.logger.error("DDNS_DOMAIN not found in environment variables.") ENABLED = False def ddns_update(interval_sec: int, retry_sec: int = 60): """ A blocking call to update the URL hosting IP via DDNS. :param interval_sec: DDNS updating interval :param retry_sec: time gap to retry upon failure """ while True: try: requests.get(f"http://dynamicdns.park-your-domain.com/update?" f"host={DDNS_HOST}&domain={DDNS_DOMAIN}&password={DDNS_PASSWORD}") LOGGER.logger.info("DDNS updated. Host: %s / Domain: %s", DDNS_HOST, DDNS_DOMAIN) time.sleep(interval_sec) except (requests.exceptions.ConnectionError, ConnectionRefusedError): LOGGER.logger.warning("Failed to update DDNS. ConnectionError. Retry in %d seconds.", retry_sec) time.sleep(retry_sec) def activate_ddns_update(interval_sec: int, retry_sec: int = 60): """ Start a :class:`Thread` to activate DDNS. Will print an error log and **NOT** raising any exceptions if any of the key environment variables is missing: - ``DDNS_PASSWORD``: Password for DDNS - ``DDNS_HOST``: Host of DDNS - ``DDNS_DOMAIN``: Domain of DDNS :param interval_sec: DDNS updating interval :param retry_sec: time gap to retry upon failure """ if ENABLED: Thread(target=ddns_update, args=(interval_sec, retry_sec)).start() else: LOGGER.logger.error("DDNS update service cannot be started " "because some necessary environment variables are not defined.")
test_util.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name """Test utils for tensorflow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from collections import OrderedDict import contextlib import functools import gc import itertools import math import os import random import re import tempfile import threading import unittest from absl.testing import parameterized import numpy as np import six _portpicker_import_error = None try: import portpicker # pylint: disable=g-import-not-at-top except ImportError as _error: _portpicker_import_error = _error portpicker = None # pylint: disable=g-import-not-at-top from google.protobuf import descriptor_pool from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python import tf2 from tensorflow.python.client import device_lib from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import tape from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import errors_impl from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import versions from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import script_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import server_lib from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect from tensorflow.python.util.protobuf import compare from tensorflow.python.util.tf_export import tf_export # If the above import is made available through the BUILD rule, then this # function is overridden and will instead return True and cause Tensorflow # graphs to be compiled with XLA. def is_xla_enabled(): return False try: from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top except: pass @tf_export("test.gpu_device_name") def gpu_device_name(): """Returns the name of a GPU device if available or the empty string.""" for x in device_lib.list_local_devices(): if x.device_type == "GPU" or x.device_type == "SYCL": return compat.as_str(x.name) return "" def assert_ops_in_graph(expected_ops, graph): """Assert all expected operations are found. Args: expected_ops: `dict<string, string>` of op name to op type. graph: Graph to check. Returns: `dict<string, node>` of node name to node. Raises: ValueError: If the expected ops are not present in the graph. """ actual_ops = {} gd = graph.as_graph_def() for node in gd.node: if node.name in expected_ops: if expected_ops[node.name] != node.op: raise ValueError("Expected op for node %s is different. %s vs %s" % (node.name, expected_ops[node.name], node.op)) actual_ops[node.name] = node if set(expected_ops.keys()) != set(actual_ops.keys()): raise ValueError("Not all expected ops are present. Expected %s, found %s" % (expected_ops.keys(), actual_ops.keys())) return actual_ops @tf_export("test.assert_equal_graph_def", v1=[]) def assert_equal_graph_def_v2(expected, actual): """Asserts that two `GraphDef`s are (mostly) the same. Compares two `GraphDef` protos for equality, ignoring versions and ordering of nodes, attrs, and control inputs. Node names are used to match up nodes between the graphs, so the naming of nodes must be consistent. This function ignores randomized attribute values that may appear in V2 checkpoints. Args: expected: The `GraphDef` we expected. actual: The `GraphDef` we have. Raises: AssertionError: If the `GraphDef`s do not match. TypeError: If either argument is not a `GraphDef`. """ assert_equal_graph_def(actual, expected, checkpoint_v2=True) @tf_export(v1=["test.assert_equal_graph_def"]) def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False): """Asserts that two `GraphDef`s are (mostly) the same. Compares two `GraphDef` protos for equality, ignoring versions and ordering of nodes, attrs, and control inputs. Node names are used to match up nodes between the graphs, so the naming of nodes must be consistent. Args: actual: The `GraphDef` we have. expected: The `GraphDef` we expected. checkpoint_v2: boolean determining whether to ignore randomized attribute values that appear in V2 checkpoints. Raises: AssertionError: If the `GraphDef`s do not match. TypeError: If either argument is not a `GraphDef`. """ assert_equal_graph_def(actual, expected, checkpoint_v2) def assert_equal_graph_def(actual, expected, checkpoint_v2=False): if not isinstance(actual, graph_pb2.GraphDef): raise TypeError( "Expected tf.GraphDef for actual, got %s" % type(actual).__name__) if not isinstance(expected, graph_pb2.GraphDef): raise TypeError( "Expected tf.GraphDef for expected, got %s" % type(expected).__name__) if checkpoint_v2: _strip_checkpoint_v2_randomized(actual) _strip_checkpoint_v2_randomized(expected) diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(), expected.SerializeToString()) if diff: raise AssertionError(compat.as_str(diff)) def assert_meta_graph_protos_equal(tester, a, b): """Compares MetaGraphDefs `a` and `b` in unit test class `tester`.""" # Carefully check the collection_defs tester.assertEqual(set(a.collection_def), set(b.collection_def)) collection_keys = a.collection_def.keys() for k in collection_keys: a_value = a.collection_def[k] b_value = b.collection_def[k] proto_type = ops.get_collection_proto_type(k) if proto_type: a_proto = proto_type() b_proto = proto_type() # Number of entries in the collections is the same tester.assertEqual( len(a_value.bytes_list.value), len(b_value.bytes_list.value)) for (a_value_item, b_value_item) in zip(a_value.bytes_list.value, b_value.bytes_list.value): a_proto.ParseFromString(a_value_item) b_proto.ParseFromString(b_value_item) tester.assertProtoEquals(a_proto, b_proto) else: tester.assertEquals(a_value, b_value) # Compared the fields directly, remove their raw values from the # proto comparison below. a.ClearField("collection_def") b.ClearField("collection_def") # Check the graph_defs. assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True) # Check graph_def versions (ignored by assert_equal_graph_def). tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions) # Compared the fields directly, remove their raw values from the # proto comparison below. a.ClearField("graph_def") b.ClearField("graph_def") tester.assertProtoEquals(a, b) # Matches attributes named via _SHARDED_SUFFIX in # tensorflow/python/training/saver.py _SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part" def _strip_checkpoint_v2_randomized(graph_def): for node in graph_def.node: delete_keys = [] for attr_key in node.attr: attr_tensor_value = node.attr[attr_key].tensor if attr_tensor_value and len(attr_tensor_value.string_val) == 1: attr_tensor_string_value = attr_tensor_value.string_val[0] if (attr_tensor_string_value and re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))): delete_keys.append(attr_key) for attr_key in delete_keys: del node.attr[attr_key] def IsGoogleCudaEnabled(): return pywrap_tensorflow.IsGoogleCudaEnabled() def CudaSupportsHalfMatMulAndConv(): return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv() def IsMklEnabled(): return pywrap_tensorflow.IsMklEnabled() def InstallStackTraceHandler(): pywrap_tensorflow.InstallStacktraceHandler() def NHWCToNCHW(input_tensor): """Converts the input from the NHWC format to NCHW. Args: input_tensor: a 4- or 5-D tensor, or an array representing shape Returns: converted tensor or shape array """ # tensor dim -> new axis order new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]} if isinstance(input_tensor, ops.Tensor): ndims = input_tensor.shape.ndims return array_ops.transpose(input_tensor, new_axes[ndims]) else: ndims = len(input_tensor) return [input_tensor[a] for a in new_axes[ndims]] def NHWCToNCHW_VECT_C(input_shape_or_tensor): """Transforms the input from the NHWC layout to NCHW_VECT_C layout. Note: Does not include quantization or type conversion steps, which should be applied afterwards. Args: input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape Returns: tensor or shape array transformed into NCHW_VECT_C Raises: ValueError: if last dimension of `input_shape_or_tensor` is not evenly divisible by 4. """ permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]} is_tensor = isinstance(input_shape_or_tensor, ops.Tensor) temp_shape = ( input_shape_or_tensor.shape.as_list() if is_tensor else input_shape_or_tensor) if temp_shape[-1] % 4 != 0: raise ValueError( "Last dimension of input must be evenly divisible by 4 to convert to " "NCHW_VECT_C.") temp_shape[-1] //= 4 temp_shape.append(4) permutation = permutations[len(temp_shape)] if is_tensor: t = array_ops.reshape(input_shape_or_tensor, temp_shape) return array_ops.transpose(t, permutation) else: return [temp_shape[a] for a in permutation] def NCHW_VECT_CToNHWC(input_shape_or_tensor): """Transforms the input from the NCHW_VECT_C layout to NHWC layout. Note: Does not include de-quantization or type conversion steps, which should be applied beforehand. Args: input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape Returns: tensor or shape array transformed into NHWC Raises: ValueError: if last dimension of `input_shape_or_tensor` is not 4. """ permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]} is_tensor = isinstance(input_shape_or_tensor, ops.Tensor) input_shape = ( input_shape_or_tensor.shape.as_list() if is_tensor else input_shape_or_tensor) if input_shape[-1] != 4: raise ValueError("Last dimension of NCHW_VECT_C must be 4.") permutation = permutations[len(input_shape)] nhwc_shape = [input_shape[a] for a in permutation[:-1]] nhwc_shape[-1] *= input_shape[-1] if is_tensor: t = array_ops.transpose(input_shape_or_tensor, permutation) return array_ops.reshape(t, nhwc_shape) else: return nhwc_shape def NCHWToNHWC(input_tensor): """Converts the input from the NCHW format to NHWC. Args: input_tensor: a 4- or 5-D tensor, or an array representing shape Returns: converted tensor or shape array """ # tensor dim -> new axis order new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]} if isinstance(input_tensor, ops.Tensor): ndims = input_tensor.shape.ndims return array_ops.transpose(input_tensor, new_axes[ndims]) else: ndims = len(input_tensor) return [input_tensor[a] for a in new_axes[ndims]] def skip_if(condition): """Skips the decorated function if condition is or evaluates to True. Args: condition: Either an expression that can be used in "if not condition" statement, or a callable whose result should be a boolean. Returns: The wrapped function """ def real_skip_if(fn): def wrapper(*args, **kwargs): if callable(condition): skip = condition() else: skip = condition if not skip: return fn(*args, **kwargs) return wrapper return real_skip_if def enable_c_shapes(fn): """No-op. TODO(b/74620627): Remove this.""" return fn def with_c_shapes(cls): """No-op. TODO(b/74620627): Remove this.""" return cls def enable_control_flow_v2(fn): """Decorator for enabling CondV2 and WhileV2 on a test. Note this enables using CondV2 and WhileV2 after running the test class's setup/teardown methods. In addition to this, callers must import the while_v2 module in order to set the _while_v2 module in control_flow_ops. Args: fn: the function to be wrapped Returns: The wrapped function """ def wrapper(*args, **kwargs): enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2 control_flow_util.ENABLE_CONTROL_FLOW_V2 = True try: return fn(*args, **kwargs) finally: control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old return wrapper def with_control_flow_v2(cls): """Adds methods that call original methods with WhileV2 and CondV2 enabled. Note this enables CondV2 and WhileV2 in new methods after running the test class's setup method. In addition to this, callers must import the while_v2 module in order to set the _while_v2 module in control_flow_ops. If a test function has _disable_control_flow_v2 attr set to True (using the @disable_control_flow_v2 decorator), the v2 function is not generated for it. Example: @test_util.with_control_flow_v2 class ControlFlowTest(test.TestCase): def testEnabledForV2(self): ... @test_util.disable_control_flow_v2("b/xyzabc") def testDisabledForV2(self): ... Generated class: class ControlFlowTest(test.TestCase): def testEnabledForV2(self): ... def testEnabledForV2WithControlFlowV2(self): // Enable V2 flags. testEnabledForV2(self) // Restore V2 flags. def testDisabledForV2(self): ... Args: cls: class to decorate Returns: cls with new test methods added """ if control_flow_util.ENABLE_CONTROL_FLOW_V2: return cls for name, value in cls.__dict__.copy().items(): if (callable(value) and name.startswith(unittest.TestLoader.testMethodPrefix) and not getattr(value, "_disable_control_flow_v2", False)): setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value)) return cls def disable_control_flow_v2(unused_msg): """Decorator for a function in a with_control_flow_v2 enabled test class. Blocks the function from being run with v2 control flow ops. Args: unused_msg: Reason for disabling. Returns: The wrapped function with _disable_control_flow_v2 attr set to True. """ def wrapper(func): func._disable_control_flow_v2 = True return func return wrapper def assert_no_new_pyobjects_executing_eagerly(f): """Decorator for asserting that no new Python objects persist after a test. Runs the test multiple times executing eagerly, first as a warmup and then to let objects accumulate. The warmup helps ignore caches which do not grow as the test is run repeatedly. Useful for checking that there are no missing Py_DECREFs in the C exercised by a bit of Python. """ def decorator(self, **kwargs): """Warms up, gets an object count, runs the test, checks for new objects.""" with context.eager_mode(): gc.disable() # Run the test 2 times as warmup, in an attempt to fill up caches, which # should not grow as the test is run repeatedly below. # # TODO(b/117156879): Running warmup twice is black magic; we have seen # tests that fail with 1 warmup run, and pass with 2, on various versions # of python2.7.x. for _ in range(2): f(self, **kwargs) gc.collect() previous_count = len(gc.get_objects()) if ops.has_default_graph(): collection_sizes_before = { collection: len(ops.get_collection(collection)) for collection in ops.get_default_graph().collections } for _ in range(3): f(self, **kwargs) # Note that gc.get_objects misses anything that isn't subject to garbage # collection (C types). Collections are a common source of leaks, so we # test for collection sizes explicitly. if ops.has_default_graph(): for collection_key in ops.get_default_graph().collections: collection = ops.get_collection(collection_key) size_before = collection_sizes_before.get(collection_key, 0) if len(collection) > size_before: raise AssertionError( ("Collection %s increased in size from " "%d to %d (current items %s).") % (collection_key, size_before, len(collection), collection)) # Make sure our collection checks don't show up as leaked memory by # removing references to temporary variables. del collection del collection_key del size_before del collection_sizes_before gc.collect() # There should be no new Python objects hanging around. new_count = len(gc.get_objects()) # In some cases (specifacally on MacOS), new_count is somehow # smaller than previous_count. # Using plain assert because not all classes using this decorator # have assertLessEqual assert new_count <= previous_count, ( "new_count(%d) is not less than or equal to previous_count(%d)" % (new_count, previous_count)) gc.enable() return decorator def assert_no_new_tensors(f): """Decorator for asserting that no new Tensors persist after a test. Mainly useful for checking that code using the Python C API has correctly manipulated reference counts. Clears the caches that it knows about, runs the garbage collector, then checks that there are no Tensor or Tensor-like objects still around. This includes Tensors to which something still has a reference (e.g. from missing Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one of the objects has __del__ defined). Args: f: The test case to run. Returns: The decorated test case. """ def decorator(self, **kwargs): """Finds existing Tensors, runs the test, checks for new Tensors.""" def _is_tensorflow_object(obj): try: return isinstance(obj, (ops.Tensor, variables.Variable, tensor_shape.Dimension, tensor_shape.TensorShape)) except ReferenceError: # If the object no longer exists, we don't care about it. return False tensors_before = set( id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj)) outside_executed_eagerly = context.executing_eagerly() # Run the test in a new graph so that collections get cleared when it's # done, but inherit the graph key so optimizers behave. outside_graph_key = ops.get_default_graph()._graph_key with ops.Graph().as_default(): ops.get_default_graph()._graph_key = outside_graph_key if outside_executed_eagerly: with context.eager_mode(): result = f(self, **kwargs) else: result = f(self, **kwargs) # Make an effort to clear caches, which would otherwise look like leaked # Tensors. context.context()._clear_caches() # pylint: disable=protected-access gc.collect() tensors_after = [ obj for obj in gc.get_objects() if _is_tensorflow_object(obj) and id(obj) not in tensors_before ] if tensors_after: raise AssertionError(("%d Tensors not deallocated after test: %s" % ( len(tensors_after), str(tensors_after), ))) return result return decorator def _find_reference_cycle(objects, idx): def get_ignore_reason(obj, blacklist): """Tests whether an object should be omitted from the dependency graph.""" if len(blacklist) > 100: return "<depth limit>" if tf_inspect.isframe(obj): if "test_util.py" in tf_inspect.getframeinfo(obj)[0]: return "<test code>" for b in blacklist: if b is obj: return "<test code>" if obj is blacklist: return "<test code>" return None # Note: this function is meant to help with diagnostics. Its output is purely # a human-readable representation, so you may freely modify it to suit your # needs. def describe(obj, blacklist, leaves_only=False): """Returns a custom human-readable summary of obj. Args: obj: the value to describe. blacklist: same as blacklist in get_ignore_reason. leaves_only: boolean flag used when calling describe recursively. Useful for summarizing collections. """ if get_ignore_reason(obj, blacklist): return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj)) if tf_inspect.isframe(obj): return "frame: {}".format(tf_inspect.getframeinfo(obj)) elif tf_inspect.ismodule(obj): return "module: {}".format(obj.__name__) else: if leaves_only: return "{}, {}".format(type(obj), id(obj)) elif isinstance(obj, list): return "list({}): {}".format( id(obj), [describe(e, blacklist, leaves_only=True) for e in obj]) elif isinstance(obj, tuple): return "tuple({}): {}".format( id(obj), [describe(e, blacklist, leaves_only=True) for e in obj]) elif isinstance(obj, dict): return "dict({}): {} keys".format(id(obj), len(obj.keys())) elif tf_inspect.isfunction(obj): return "function({}) {}; globals ID: {}".format( id(obj), obj.__name__, id(obj.__globals__)) else: return "{}, {}".format(type(obj), id(obj)) def build_ref_graph(obj, graph, reprs, blacklist): """Builds a reference graph as <referrer> -> <list of refferents>. Args: obj: The object to start from. The graph will be built by recursively adding its referrers. graph: Dict holding the graph to be built. To avoid creating extra references, the graph holds object IDs rather than actual objects. reprs: Auxiliary structure that maps object IDs to their human-readable description. blacklist: List of objects to ignore. """ referrers = gc.get_referrers(obj) blacklist = blacklist + (referrers,) obj_id = id(obj) for r in referrers: if get_ignore_reason(r, blacklist) is None: r_id = id(r) if r_id not in graph: graph[r_id] = [] if obj_id not in graph[r_id]: graph[r_id].append(obj_id) build_ref_graph(r, graph, reprs, blacklist) reprs[r_id] = describe(r, blacklist) def find_cycle(el, graph, reprs, path): """Finds and prints a single cycle in the dependency graph.""" if el not in graph: return for r in graph[el]: if r in path: logging.error("Reference cycle sample:") for p in path + (r,): logging.error(reprs.get(p, "unknown object " + str(p))) return True else: if find_cycle(r, graph, reprs, path + (r,)): return True return False obj = objects[idx] graph = {} # referrer ID -> object ID reprs = {} # object ID -> description build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason, describe, build_ref_graph, find_cycle)) for k in graph: if find_cycle(k, graph, reprs, ()): return True return False def assert_no_garbage_created(f): """Test method decorator to assert that no garbage has been created. Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters cannot be un-set (i.e. will disable garbage collection for any other unit tests in the same file/shard). Args: f: The function to decorate. Returns: The decorated function. """ def decorator(self, **kwargs): """Sets DEBUG_SAVEALL, runs the test, and checks for new garbage.""" # Force-load `distribution_strategy_context` to prevent GC at # test time when using eager. Remove once b/117329403 is resolved. tape.distribution_strategy_context.get_strategy() gc.disable() previous_debug_flags = gc.get_debug() gc.set_debug(gc.DEBUG_SAVEALL) gc.collect() previous_garbage = len(gc.garbage) result = f(self, **kwargs) gc.collect() new_garbage = len(gc.garbage) if new_garbage > previous_garbage: logging.error( "The decorated test created work for Python's garbage collector, " "likely due to a reference cycle. New objects in cycle(s):") for i, obj in enumerate(gc.garbage[previous_garbage:]): try: logging.error("Object %d of %d", i, len(gc.garbage) - previous_garbage) def _safe_object_str(obj): return "<%s %d>" % (obj.__class__.__name__, id(obj)) logging.error(" Object type: %s", _safe_object_str(obj)) logging.error( " Referrer types: %s", ", ".join( [_safe_object_str(ref) for ref in gc.get_referrers(obj)])) logging.error( " Referent types: %s", ", ".join( [_safe_object_str(ref) for ref in gc.get_referents(obj)])) logging.error(" Object attribute names: %s", dir(obj)) logging.error(" Object __str__:") logging.error(obj) logging.error(" Object __repr__:") logging.error(repr(obj)) except Exception: # pylint: disable=broad-except logging.error("(Exception while printing object)") # When garbage is created, this call can help identify reference cycles, # which are typically the cause of such garbage. if new_garbage > previous_garbage: for i in range(previous_garbage, new_garbage): if _find_reference_cycle(gc.garbage, i): break # This will fail if any garbage has been created, typically because of a # reference cycle. self.assertEqual(previous_garbage, new_garbage) # TODO(allenl): Figure out why this debug flag reset doesn't work. It would # be nice to be able to decorate arbitrary tests in a large test suite and # not hold on to every object in other tests. gc.set_debug(previous_debug_flags) gc.enable() return result return decorator def _combine_named_parameters(**kwargs): """Generate combinations based on its keyword arguments. Two sets of returned combinations can be concatenated using +. Their product can be computed using `times()`. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values. """ if not kwargs: return [OrderedDict()] sort_by_key = lambda k: k[0][0] kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key)) first = list(kwargs.items())[0] rest = dict(list(kwargs.items())[1:]) rest_combined = _combine_named_parameters(**rest) key = first[0] values = first[1] if not isinstance(values, list): values = [values] combinations = [ OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key)) for v in values for combined in rest_combined ] return combinations def generate_combinations_with_testcase_name(**kwargs): """Generate combinations based on its keyword arguments using combine(). This function calls combine() and appends a testcase name to the list of dictionaries returned. The 'testcase_name' key is a required for named parameterized tests. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values. """ combinations = _combine_named_parameters(**kwargs) named_combinations = [] for combination in combinations: assert isinstance(combination, OrderedDict) name = "".join([ "_{}_{}".format("".join(filter(str.isalnum, key)), "".join( filter(str.isalnum, str(value)))) for key, value in combination.items() ]) named_combinations.append( OrderedDict( list(combination.items()) + [("testcase_name", "_test{}".format(name))])) return named_combinations def run_all_in_graph_and_eager_modes(cls): """Execute all test methods in the given class with and without eager.""" base_decorator = run_in_graph_and_eager_modes for name, value in cls.__dict__.copy().items(): if callable(value) and name.startswith( unittest.TestLoader.testMethodPrefix) and not ( name.startswith("testSkipEager") or name.startswith("test_skip_eager") or name == "test_session"): setattr(cls, name, base_decorator(value)) return cls def build_as_function_and_v1_graph(func=None): """Run a test case in v1 graph mode and inside tf.function in eager mode. WARNING: This decorator can only be used in test cases that statically checks generated graph. Attempting to evaluate graph or function results via. session.run() or self.evaluate() will fail. WARNING: This decorator can only be used for test cases that inherit from absl.testing.parameterized.TestCase. Args: func: Test case function to be decorated. Returns: Decorated test case function. """ def decorator(f): if tf_inspect.isclass(f): raise ValueError( "`run_in_graph_mode_and_function` only supports test methods.") @parameterized.named_parameters(("_v1_graph", "v1_graph"), ("_function", "function")) @functools.wraps(f) def decorated(self, run_mode, *args, **kwargs): if run_mode == "v1_graph": with ops.Graph().as_default(): f(self, *args, **kwargs) elif run_mode == "function": @def_function.function def function_in_eager(): f(self, *args, **kwargs) # Create a new graph for the eagerly executed version of this test for # better isolation. graph_for_eager_test = ops.Graph() with graph_for_eager_test.as_default(), context.eager_mode(): function_in_eager() ops.dismantle_graph(graph_for_eager_test) else: return ValueError("Unknown run mode %s" % run_mode) return decorated if func is not None: return decorator(func) return decorator def run_in_graph_and_eager_modes(func=None, config=None, use_gpu=True, reset_test=True, assert_no_eager_garbage=False): """Execute the decorated test with and without enabling eager execution. This function returns a decorator intended to be applied to test methods in a `tf.test.TestCase` class. Doing so will cause the contents of the test method to be executed twice - once normally, and once with eager execution enabled. This allows unittests to confirm the equivalence between eager and graph execution (see `tf.enable_eager_execution`). For example, consider the following unittest: ```python class MyTests(tf.test.TestCase): @run_in_graph_and_eager_modes def test_foo(self): x = tf.constant([1, 2]) y = tf.constant([3, 4]) z = tf.add(x, y) self.assertAllEqual([4, 6], self.evaluate(z)) if __name__ == "__main__": tf.test.main() ``` This test validates that `tf.add()` has the same behavior when computed with eager execution enabled as it does when constructing a TensorFlow graph and executing the `z` tensor in a session. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. use_gpu: If True, attempt to run as many operations as possible on GPU. reset_test: If True, tearDown and SetUp the test case between the two executions of the test (once with and once without eager execution). assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage collector and asserts that no extra garbage has been created when running the test with eager execution enabled. This will fail if there are reference cycles (e.g. a = []; a.append(a)). Off by default because some tests may create garbage for legitimate reasons (e.g. they define a class which inherits from `object`), and because DEBUG_SAVEALL is sticky in some Python interpreters (meaning that tests which rely on objects being collected elsewhere in the unit test file will not work). Additionally, checks that nothing still has a reference to Tensors that the test allocated. Returns: Returns a decorator that will run the decorated test method twice: once by constructing and executing a graph in a session and once with eager execution enabled. """ def decorator(f): if tf_inspect.isclass(f): raise ValueError( "`run_in_graph_and_eager_modes` only supports test methods. " "Did you mean to use `run_all_in_graph_and_eager_modes`?") def decorated(self, *args, **kwargs): try: with context.graph_mode(): with self.test_session(use_gpu=use_gpu, config=config): f(self, *args, **kwargs) except unittest.case.SkipTest: pass def run_eagerly(self, **kwargs): if not use_gpu: with ops.device("/device:CPU:0"): f(self, *args, **kwargs) else: f(self, *args, **kwargs) if assert_no_eager_garbage: ops.reset_default_graph() run_eagerly = assert_no_new_tensors( assert_no_garbage_created(run_eagerly)) if reset_test: # This decorator runs the wrapped test twice. # Reset the test environment between runs. self.tearDown() self._tempdir = None # Create a new graph for the eagerly executed version of this test for # better isolation. graph_for_eager_test = ops.Graph() with graph_for_eager_test.as_default(), context.eager_mode(): if reset_test: self.setUp() run_eagerly(self, **kwargs) ops.dismantle_graph(graph_for_eager_test) return decorated if func is not None: return decorator(func) return decorator def py_func_if_in_function(f): def decorated(*args, **kwds): if not ops.get_default_graph()._building_function: return f(*args, **kwds) tensor_args = [] tensor_indices = [] for i, arg in enumerate(args): if isinstance(arg, (ops.Tensor, variables.Variable)): tensor_args.append(arg) tensor_indices.append(i) def inner_f(*inner_tensor_args): my_args = list(args) for i, n in zip(tensor_indices, inner_tensor_args): my_args[i] = n return f(*my_args, **kwds) return script_ops.py_func(inner_f, tensor_args, []) return tf_decorator.make_decorator(f, decorated) def also_run_as_tf_function(f): """Runs the decorated test twice--once as is, once inside a tf.function. This allows you to run a test both in eager execution and inside a tf.function, exercising the two execution modes supported in tf 2.0. The test assertions are automatically done inside tf.py_funcs, and tf.function ensures that they run in the proper order and with the proper side effects. Currently variable creation is not supported in tests annotated with this decorator since it's tricky to ensure the variable doesn't get repeatedly created when retracing the tf.function. Args: f: the test method to be decorated Returns: The decorated test method, which will run both in eager and inside a tf.function. """ def decorated(*args, **kwds): def bound_f(): f(*args, **kwds) with context.eager_mode(): # Running in eager mode bound_f() # Running as TF function # TODO(b/121143941): Remove the autograph override. def_function.function(bound_f, autograph=False)() return decorated def deprecated_graph_mode_only(func=None): """Execute the decorated test in graph mode. This function returns a decorator intended to be applied to tests that are not compatible with eager mode. When this decorator is applied, the test body will be run in an environment where API calls construct graphs instead of executing eagerly. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will run the decorated test method in graph mode. """ def decorator(f): if tf_inspect.isclass(f): setup = f.__dict__.get("setUp") if setup is not None: setattr(f, "setUp", decorator(setup)) for name, value in f.__dict__.copy().items(): if (callable(value) and name.startswith(unittest.TestLoader.testMethodPrefix)): setattr(f, name, decorator(value)) return f def decorated(self, *args, **kwargs): if tf2.enabled(): with context.graph_mode(): return f(self, *args, **kwargs) else: return f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator run_deprecated_v1 = deprecated_graph_mode_only def run_v1_only(reason, func=None): """Execute the decorated test only if running in v1 mode. This function is intended to be applied to tests that exercise v1 only functionality. If the test is run in v2 mode it will simply be skipped. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: reason: string giving a reason for limiting the test to v1 only. func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method. """ if not isinstance(reason, str): raise ValueError("'reason' should be string, got {}".format(type(reason))) def decorator(f): if tf_inspect.isclass(f): # To skip an entire test suite class, we only decorate the setUp method # to skip all tests. There are cases when setUp is not defined (not # overridden in subclasses of TestCase, so not available in f.__dict__ # below). For those cases, we walk the method resolution order list and # pick the first setUp method we find (usually this should be the one in # the parent class since that's the TestCase class). for cls in type.mro(f): setup = cls.__dict__.get("setUp") if setup is not None: setattr(f, "setUp", decorator(setup)) break return f else: # If f is just a function, just create a decorator for it and return it def decorated(self, *args, **kwargs): if tf2.enabled(): self.skipTest(reason) return f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator def run_v2_only(func=None): """Execute the decorated test only if running in v2 mode. This function is intended to be applied to tests that exercise v2 only functionality. If the test is run in v1 mode it will simply be skipped. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method. """ def decorator(f): if tf_inspect.isclass(f): raise ValueError("`run_v2_only` only supports test methods.") def decorated(self, *args, **kwargs): if not tf2.enabled(): self.skipTest("Test is only comptaible in v2") return f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator def run_gpu_only(func=None): """Execute the decorated test only if a GPU is available. This function is intended to be applied to tests that require the presence of a GPU. If a GPU is absent, it will simply be skipped. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method. """ def decorator(f): if tf_inspect.isclass(f): raise ValueError("`run_gpu_only` only supports test methods.") def decorated(self, *args, **kwargs): if not is_gpu_available(): self.skipTest("Test requires GPU") return f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator def run_cuda_only(func=None): """Execute the decorated test only if a GPU is available. This function is intended to be applied to tests that require the precense of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method. """ def decorator(f): if tf_inspect.isclass(f): raise ValueError("`run_cuda_only` only supports test methods.") def decorated(self, *args, **kwargs): if not is_gpu_available(cuda_only=True): self.skipTest("Test requires CUDA GPU") return f(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator @tf_export("test.is_gpu_available") def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None): """Returns whether TensorFlow can access a GPU. Args: cuda_only: limit the search to CUDA gpus. min_cuda_compute_capability: a (major,minor) pair that indicates the minimum CUDA compute capability required, or None if no requirement. Returns: True if a gpu device of the requested kind is available. """ def compute_capability_from_device_desc(device_desc): # TODO(jingyue): The device description generator has to be in sync with # this file. Another option is to put compute capability in # DeviceAttributes, but I avoided that to keep DeviceAttributes # target-independent. Reconsider this option when we have more things like # this to keep in sync. # LINT.IfChange match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc) # LINT.ThenChange(//tensorflow/core/\ # common_runtime/gpu/gpu_device.cc) if not match: return 0, 0 return int(match.group(1)), int(match.group(2)) try: for local_device in device_lib.list_local_devices(): if local_device.device_type == "GPU": if (min_cuda_compute_capability is None or compute_capability_from_device_desc( local_device.physical_device_desc) >= min_cuda_compute_capability): return True if local_device.device_type == "SYCL" and not cuda_only: return True return False except errors_impl.NotFoundError as e: if not all(x in str(e) for x in ["CUDA", "not find"]): raise e else: logging.error(str(e)) return False @contextlib.contextmanager def device(use_gpu): """Uses gpu when requested and available.""" if use_gpu and is_gpu_available(): dev = "/device:GPU:0" else: dev = "/device:CPU:0" with ops.device(dev): yield @contextlib.contextmanager def use_gpu(): """Uses gpu when requested and available.""" with device(use_gpu=True): yield @contextlib.contextmanager def force_gpu(): """Force the gpu to be used.""" with ops.device("/device:GPU:0"): yield @contextlib.contextmanager def force_cpu(): """Force the cpu to be used.""" with ops.device("/device:CPU:0"): yield class CapturedWrites(object): """A utility class to load the captured writes made to a stream.""" def __init__(self, capture_location): self.capture_location = capture_location def contents(self): """Get the captured writes as a single string.""" with open(self.capture_location) as tmp_file: output_data = "".join(tmp_file.readlines()) return output_data class FakeEagerSession(object): """Fake session so tests that conditionally use placeholders can use eager. There are a number of tests that conditionally use placeholders for shape inference. The pattern is demonstrated here: ```python with self.cached_session() as sess: if static_shape: y = math_ops.matmul(x, ...) feed_dict = {} else: x_ph = array_ops.placeholder(...) y = math_ops.matmul(x_ph, ...) feed_dict = {x_ph: x} val = sess.run(y, feed_dict=feed_dict) ``` Since the feed_dict is empty when not using placeholders we should be able to call self.evaluate(), however this requires rewriting the test case. This class should be considered a stop-gap solution to get tests running with eager with minimal changes to the actual test. """ def __init__(self, test_case): self._test_case = test_case def run(self, fetches, *args, **kwargs): """Evalaute `fetches`. Fail if additional args are specified. Args: fetches: A Tensor or a nested list/tuple of Tensors. *args: Positional arguments **kwargs: Keyword arguments Raises: RuntimeError: If args or kwargs are specified. Returns: Tensors as numpy values. """ feed_dict = kwargs.pop("feed_dict", {}) if feed_dict: raise RuntimeError( "feed_dict is not supported when eager execution is enabled " "(in this case, sess.run(t) is shorthand for t.numpy()") if args or kwargs: raise RuntimeError( "Optional args are not supported when eager execution is enabled " "(in this case, sess.run(t) is shorthand for t.numpy()") return self._test_case.evaluate(fetches) class ErrorLoggingSession(session.Session): """Wrapper around a Session that logs errors in run().""" def run(self, *args, **kwargs): try: return super(ErrorLoggingSession, self).run(*args, **kwargs) except Exception as e: # pylint: disable=broad-except # Note: disable the logging for OutOfRangeError, which makes the output # of tf.data tests hard to read, because OutOfRangeError is used as the # signal completion if not isinstance(e, errors.OutOfRangeError): logging.error(str(e)) raise def use_deterministic_cudnn(func): """Disable autotuning during the call to this function. Some tests want to base assertions on a graph being isomorphic with a copy. To ensure this, this decorator disables autotuning. Args: func: Function to run with CUDNN autotuning turned off. Returns: Decorated function. """ def decorator(f): def decorated(self, *args, **kwargs): original_var = os.environ.get("TF_CUDNN_DETERMINISTIC", "") os.environ["TF_CUDNN_DETERMINISTIC"] = "true" result = f(self, *args, **kwargs) os.environ["TF_CUDNN_DETERMINISTIC"] = original_var return result return decorated if func is not None: return decorator(func) return decorator # The description is just for documentation purposes. def disable_xla(description): def disable_xla_impl(func): """Execute the test method only if xla is not enabled.""" def decorator(func): def decorated(self, *args, **kwargs): if is_xla_enabled(): return else: return func(self, *args, **kwargs) return decorated if func is not None: return decorator(func) return decorator return disable_xla_impl # The description is just for documentation purposes. def disable_all_xla(description): def disable_all_impl(cls): """Execute all test methods in this class only if xla is not enabled.""" base_decorator = disable_xla for name in dir(cls): value = getattr(cls, name) if callable(value) and name.startswith( "test") and not name == "test_session": setattr(cls, name, base_decorator(description)(value)) return cls return disable_all_impl class EagerSessionWarner(object): def __getattr__(self, attr): raise AttributeError( "Trying to access properties or call methods on the result of " "self.session(), self.cached_session(), etc while eager execution " "is enabled. If you're porting this test case to TF 2.0, either " "adapt the test to work with eager execution or insert a call to " "tf.disable_eager_execution() in the main() function of this test " "file.") @tf_export("test.TestCase") class TensorFlowTestCase(googletest.TestCase): """Base class for tests that need to test TensorFlow.""" def __init__(self, methodName="runTest"): # pylint: disable=invalid-name super(TensorFlowTestCase, self).__init__(methodName) if is_xla_enabled(): os.putenv( "TF_XLA_FLAGS", "--tf_xla_auto_jit=2 --tf_xla_min_cluster_size=1 " "--tf_xla_enable_lazy_compilation=false " + os.getenv("TF_XLA_FLAGS", "")) self._threads = [] self._tempdir = None self._cached_session = None def setUp(self): self._ClearCachedSession() random.seed(random_seed.DEFAULT_GRAPH_SEED) np.random.seed(random_seed.DEFAULT_GRAPH_SEED) # Note: The following line is necessary because some test methods may error # out from within nested graph contexts (e.g., via assertRaises and # assertRaisesRegexp), which may leave ops._default_graph_stack non-empty # under certain versions of Python. That would cause # ops.reset_default_graph() to throw an exception if the stack were not # cleared first. ops._default_graph_stack.reset() # pylint: disable=protected-access ops.reset_default_graph() random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED) # Avoiding calling setUp() for the poorly named test_session method. if self.id().endswith(".test_session"): self.skipTest("Not a test.") def tearDown(self): for thread in self._threads: thread.check_termination() self._ClearCachedSession() def _ClearCachedSession(self): if self._cached_session is not None: self._cached_session.close() self._cached_session = None def get_temp_dir(self): """Returns a unique temporary directory for the test to use. If you call this method multiple times during in a test, it will return the same folder. However, across different runs the directories will be different. This will ensure that across different runs tests will not be able to pollute each others environment. If you need multiple unique directories within a single test, you should use tempfile.mkdtemp as follows: tempfile.mkdtemp(dir=self.get_temp_dir()): Returns: string, the path to the unique temporary directory created for this test. """ if not self._tempdir: self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir()) return self._tempdir @contextlib.contextmanager def captureWritesToStream(self, stream): """A context manager that captures the writes to a given stream. This context manager captures all writes to a given stream inside of a `CapturedWrites` object. When this context manager is created, it yields the `CapturedWrites` object. The captured contents can be accessed by calling `.contents()` on the `CapturedWrites`. For this function to work, the stream must have a file descriptor that can be modified using `os.dup` and `os.dup2`, and the stream must support a `.flush()` method. The default python sys.stdout and sys.stderr are examples of this. Note that this does not work in Colab or Jupyter notebooks, because those use alternate stdout streams. Example: ```python class MyOperatorTest(test_util.TensorFlowTestCase): def testMyOperator(self): input = [1.0, 2.0, 3.0, 4.0, 5.0] with self.captureWritesToStream(sys.stdout) as captured: result = MyOperator(input).eval() self.assertStartsWith(captured.contents(), "This was printed.") ``` Args: stream: The stream whose writes should be captured. This stream must have a file descriptor, support writing via using that file descriptor, and must have a `.flush()` method. Yields: A `CapturedWrites` object that contains all writes to the specified stream made during this context. """ stream.flush() fd = stream.fileno() tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir()) tmp_file = open(tmp_file_path, "w") orig_fd = os.dup(fd) os.dup2(tmp_file.fileno(), fd) try: yield CapturedWrites(tmp_file_path) finally: tmp_file.close() os.dup2(orig_fd, fd) def _AssertProtoEquals(self, a, b, msg=None): """Asserts that a and b are the same proto. Uses ProtoEq() first, as it returns correct results for floating point attributes, and then use assertProtoEqual() in case of failure as it provides good error messages. Args: a: a proto. b: another proto. msg: Optional message to report on failure. """ if not compare.ProtoEq(a, b): compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg) def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None): """Asserts that message is same as parsed expected_message_ascii. Creates another prototype of message, reads the ascii message into it and then compares them using self._AssertProtoEqual(). Args: expected_message_maybe_ascii: proto message in original or ascii form. message: the message to validate. msg: Optional message to report on failure. """ msg = msg if msg else "" if isinstance(expected_message_maybe_ascii, type(message)): expected_message = expected_message_maybe_ascii self._AssertProtoEquals(expected_message, message) elif isinstance(expected_message_maybe_ascii, str): expected_message = type(message)() text_format.Merge( expected_message_maybe_ascii, expected_message, descriptor_pool=descriptor_pool.Default()) self._AssertProtoEquals(expected_message, message, msg=msg) else: assert False, ("Can't compare protos of type %s and %s. %s" % (type(expected_message_maybe_ascii), type(message), msg)) def assertProtoEqualsVersion( self, expected, actual, producer=versions.GRAPH_DEF_VERSION, min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER, msg=None): expected = "versions { producer: %d min_consumer: %d };\n%s" % ( producer, min_consumer, expected) self.assertProtoEquals(expected, actual, msg=msg) def assertStartsWith(self, actual, expected_start, msg=None): """Assert that actual.startswith(expected_start) is True. Args: actual: str expected_start: str msg: Optional message to report on failure. """ if not actual.startswith(expected_start): fail_msg = "%r does not start with %r" % (actual, expected_start) fail_msg += " : %r" % (msg) if msg else "" self.fail(fail_msg) def _eval_tensor(self, tensor): if tensor is None: return None elif callable(tensor): return self._eval_helper(tensor()) else: try: if sparse_tensor.is_sparse(tensor): return sparse_tensor.SparseTensorValue(tensor.indices.numpy(), tensor.values.numpy(), tensor.dense_shape.numpy()) elif isinstance(tensor, ops.IndexedSlices): return ops.IndexedSlicesValue(values=tensor.values.numpy(), indices=tensor.indices.numpy(), dense_shape=tensor.dense_shape.numpy()) return tensor.numpy() except AttributeError as e: six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e) def _eval_helper(self, tensors): if tensors is None: return None return nest.map_structure(self._eval_tensor, tensors) def evaluate(self, tensors): """Evaluates tensors and returns numpy values. Args: tensors: A Tensor or a nested list/tuple of Tensors. Returns: tensors numpy values. """ if context.executing_eagerly(): return self._eval_helper(tensors) else: sess = ops.get_default_session() if sess is None: with self.test_session() as sess: return sess.run(tensors) else: return sess.run(tensors) # pylint: disable=g-doc-return-or-yield @contextlib.contextmanager def session(self, graph=None, config=None, use_gpu=False, force_gpu=False): """Returns a TensorFlow Session for use in executing tests. Note that this will set this session and the graph as global defaults. Use the `use_gpu` and `force_gpu` options to control where ops are run. If `force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if `use_gpu` is True, TensorFlow tries to run as many ops on the GPU as possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to the CPU. Example: ```python class MyOperatorTest(test_util.TensorFlowTestCase): def testMyOperator(self): with self.session(use_gpu=True): valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] result = MyOperator(valid_input).eval() self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] invalid_input = [-1.0, 2.0, 7.0] with self.assertRaisesOpError("negative input not supported"): MyOperator(invalid_input).eval() ``` Args: graph: Optional graph to use during the returned session. config: An optional config_pb2.ConfigProto to use to configure the session. use_gpu: If True, attempt to run as many ops as possible on GPU. force_gpu: If True, pin all ops to `/device:GPU:0`. Yields: A Session object that should be used as a context manager to surround the graph building and execution code in a test case. """ if context.executing_eagerly(): yield EagerSessionWarner() else: with self._create_session(graph, config, force_gpu) as sess: with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu): yield sess @contextlib.contextmanager def cached_session(self, graph=None, config=None, use_gpu=False, force_gpu=False): """Returns a TensorFlow Session for use in executing tests. This method behaves differently than self.session(): for performance reasons `cached_session` will by default reuse the same session within the same test. The session returned by this function will only be closed at the end of the test (in the TearDown function). Use the `use_gpu` and `force_gpu` options to control where ops are run. If `force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if `use_gpu` is True, TensorFlow tries to run as many ops on the GPU as possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to the CPU. Example: ```python class MyOperatorTest(test_util.TensorFlowTestCase): def testMyOperator(self): with self.cached_session(use_gpu=True) as sess: valid_input = [1.0, 2.0, 3.0, 4.0, 5.0] result = MyOperator(valid_input).eval() self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0] invalid_input = [-1.0, 2.0, 7.0] with self.assertRaisesOpError("negative input not supported"): MyOperator(invalid_input).eval() ``` Args: graph: Optional graph to use during the returned session. config: An optional config_pb2.ConfigProto to use to configure the session. use_gpu: If True, attempt to run as many ops as possible on GPU. force_gpu: If True, pin all ops to `/device:GPU:0`. Yields: A Session object that should be used as a context manager to surround the graph building and execution code in a test case. """ if context.executing_eagerly(): yield FakeEagerSession(self) else: sess = self._get_cached_session( graph, config, force_gpu, crash_if_inconsistent_args=True) with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu) as cached: yield cached @contextlib.contextmanager @deprecation.deprecated(None, "Use `self.session()` or " "`self.cached_session()` instead.") def test_session(self, graph=None, config=None, use_gpu=False, force_gpu=False): """Use cached_session instead.""" if self.id().endswith(".test_session"): self.skipTest("Not a test.") if context.executing_eagerly(): yield None else: if graph is None: sess = self._get_cached_session( graph, config, force_gpu, crash_if_inconsistent_args=False) with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu) as cached: yield cached else: with self.session(graph, config, use_gpu, force_gpu) as sess: yield sess # pylint: enable=g-doc-return-or-yield class _CheckedThread(object): """A wrapper class for Thread that asserts successful completion. This class should be created using the TensorFlowTestCase.checkedThread() method. """ def __init__(self, testcase, target, args=None, kwargs=None): """Constructs a new instance of _CheckedThread. Args: testcase: The TensorFlowTestCase for which this thread is being created. target: A callable object representing the code to be executed in the thread. args: A tuple of positional arguments that will be passed to target. kwargs: A dictionary of keyword arguments that will be passed to target. """ self._testcase = testcase self._target = target self._args = () if args is None else args self._kwargs = {} if kwargs is None else kwargs self._thread = threading.Thread(target=self._protected_run) self._exception = None self._is_thread_joined = False def _protected_run(self): """Target for the wrapper thread. Sets self._exception on failure.""" try: self._target(*self._args, **self._kwargs) except Exception as e: # pylint: disable=broad-except self._exception = e def start(self): """Starts the thread's activity. This must be called at most once per _CheckedThread object. It arranges for the object's target to be invoked in a separate thread of control. """ self._thread.start() def join(self): """Blocks until the thread terminates. Raises: self._testcase.failureException: If the thread terminates with due to an exception. """ self._is_thread_joined = True self._thread.join() if self._exception is not None: self._testcase.fail("Error in checkedThread: %s" % str(self._exception)) def is_alive(self): """Returns whether the thread is alive. This method returns True just before the run() method starts until just after the run() method terminates. Returns: True if the thread is alive, otherwise False. """ return self._thread.is_alive() def check_termination(self): """Returns whether the checked thread was properly used and did terminate. Every checked thread should be "join"ed after starting, and before the test tears down. If it is not joined, it is possible the thread will hang and cause flaky failures in tests. Raises: self._testcase.failureException: If check_termination was called before thread was joined. RuntimeError: If the thread is not terminated. This means thread was not joined with the main thread. """ if self._is_thread_joined: if self.is_alive(): raise RuntimeError( "Thread was not joined with main thread, and is still running " "when the test finished.") else: self._testcase.fail("A checked thread was not joined.") def checkedThread(self, target, args=None, kwargs=None): """Returns a Thread wrapper that asserts 'target' completes successfully. This method should be used to create all threads in test cases, as otherwise there is a risk that a thread will silently fail, and/or assertions made in the thread will not be respected. Args: target: A callable object to be executed in the thread. args: The argument tuple for the target invocation. Defaults to (). kwargs: A dictionary of keyword arguments for the target invocation. Defaults to {}. Returns: A wrapper for threading.Thread that supports start() and join() methods. """ ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs) self._threads.append(ret) return ret # pylint: enable=invalid-name @py_func_if_in_function def assertNear(self, f1, f2, err, msg=None): """Asserts that two floats are near each other. Checks that |f1 - f2| < err and asserts a test failure if not. Args: f1: A float value. f2: A float value. err: A float value. msg: An optional string message to append to the failure message. """ # f1 == f2 is needed here as we might have: f1, f2 = inf, inf self.assertTrue( f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg if msg is not None else "")) @py_func_if_in_function def assertArrayNear(self, farray1, farray2, err, msg=None): """Asserts that two float arrays are near each other. Checks that for all elements of farray1 and farray2 |f1 - f2| < err. Asserts a test failure if not. Args: farray1: a list of float values. farray2: a list of float values. err: a float value. msg: Optional message to report on failure. """ self.assertEqual(len(farray1), len(farray2), msg=msg) for f1, f2 in zip(farray1, farray2): self.assertNear(float(f1), float(f2), err, msg=msg) def _NDArrayNear(self, ndarray1, ndarray2, err): return np.linalg.norm(ndarray1 - ndarray2) < err @py_func_if_in_function def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None): """Asserts that two numpy arrays have near values. Args: ndarray1: a numpy ndarray. ndarray2: a numpy ndarray. err: a float. The maximum absolute difference allowed. msg: Optional message to report on failure. """ self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg) def _GetNdArray(self, a): # If a is a tensor then convert it to ndarray if isinstance(a, ops.Tensor): if isinstance(a, ops._EagerTensorBase): a = a.numpy() else: a = self.evaluate(a) if not isinstance(a, np.ndarray): return np.array(a) return a def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None): a = self._GetNdArray(a) b = self._GetNdArray(b) # When the array rank is small, print its contents. Numpy array printing is # implemented using inefficient recursion so prints can cause tests to # time out. if a.shape != b.shape and (b.ndim <= 3 or b.size < 500): shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents " "%s.") % (a.shape, b.shape, b) else: shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape, b.shape) self.assertEqual(a.shape, b.shape, shape_mismatch_msg) msgs = [msg] if not np.allclose(a, b, rtol=rtol, atol=atol): # Adds more details to np.testing.assert_allclose. # # NOTE: numpy.allclose (and numpy.testing.assert_allclose) # checks whether two arrays are element-wise equal within a # tolerance. The relative difference (rtol * abs(b)) and the # absolute difference atol are added together to compare against # the absolute difference between a and b. Here, we want to # tell user which elements violate such conditions. cond = np.logical_or( np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b)) if a.ndim: x = a[np.where(cond)] y = b[np.where(cond)] msgs.append("not close where = {}".format(np.where(cond))) else: # np.where is broken for scalars x, y = a, b msgs.append("not close lhs = {}".format(x)) msgs.append("not close rhs = {}".format(y)) msgs.append("not close dif = {}".format(np.abs(x - y))) msgs.append("not close tol = {}".format(atol + rtol * np.abs(y))) msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape)) # TODO(xpan): There seems to be a bug: # tensorflow/compiler/tests:binary_ops_test pass with float32 # nan even though the equal_nan is False by default internally. np.testing.assert_allclose( a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True) def _assertAllCloseRecursive(self, a, b, rtol=1e-6, atol=1e-6, path=None, msg=None): path = path or [] path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "") msg = msg if msg else "" # Check if a and/or b are namedtuples. if hasattr(a, "_asdict"): a = a._asdict() if hasattr(b, "_asdict"): b = b._asdict() a_is_dict = isinstance(a, collections.Mapping) if a_is_dict != isinstance(b, collections.Mapping): raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" % (path_str, path_str, msg)) if a_is_dict: self.assertItemsEqual( a.keys(), b.keys(), msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" % (path_str, a.keys(), path_str, b.keys(), msg)) for k in a: path.append(k) self._assertAllCloseRecursive( a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg) del path[-1] elif isinstance(a, (list, tuple)): # Try to directly compare a, b as ndarrays; if not work, then traverse # through the sequence, which is more expensive. try: a_as_ndarray = self._GetNdArray(a) b_as_ndarray = self._GetNdArray(b) self._assertArrayLikeAllClose( a_as_ndarray, b_as_ndarray, rtol=rtol, atol=atol, msg="Mismatched value: a%s is different from b%s. %s" % (path_str, path_str, msg)) except (ValueError, TypeError) as e: if len(a) != len(b): raise ValueError( "Mismatched length: a%s has %d items, but b%s has %d items. %s" % (path_str, len(a), path_str, len(b), msg)) for idx, (a_ele, b_ele) in enumerate(zip(a, b)): path.append(str(idx)) self._assertAllCloseRecursive( a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg) del path[-1] # a and b are ndarray like objects else: try: self._assertArrayLikeAllClose( a, b, rtol=rtol, atol=atol, msg=("Mismatched value: a%s is different from b%s. %s" % (path_str, path_str, msg))) except TypeError as e: msg = ("Error: a%s has %s, but b%s has %s. %s" % (path_str, type(a), path_str, type(b), msg)) e.args = ((e.args[0] + " : " + msg,) + e.args[1:]) raise @py_func_if_in_function def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None): """Asserts that two structures of numpy arrays or Tensors, have near values. `a` and `b` can be arbitrarily nested structures. A layer of a nested structure can be a `dict`, `namedtuple`, `tuple` or `list`. Args: a: The expected numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor), or any arbitrarily nested of structure of these. b: The actual numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor), or any arbitrarily nested of structure of these. rtol: relative tolerance. atol: absolute tolerance. msg: Optional message to report on failure. Raises: ValueError: if only one of `a[p]` and `b[p]` is a dict or `a[p]` and `b[p]` have different length, where `[p]` denotes a path to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and `[p] = [1]['d']`, then `a[p] = (6, 7)`. """ self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg) @py_func_if_in_function def assertAllCloseAccordingToType(self, a, b, rtol=1e-6, atol=1e-6, float_rtol=1e-6, float_atol=1e-6, half_rtol=1e-3, half_atol=1e-3, bfloat16_rtol=1e-2, bfloat16_atol=1e-2, msg=None): """Like assertAllClose, but also suitable for comparing fp16 arrays. In particular, the tolerance is reduced to 1e-3 if at least one of the arguments is of type float16. Args: a: the expected numpy ndarray or anything can be converted to one. b: the actual numpy ndarray or anything can be converted to one. rtol: relative tolerance. atol: absolute tolerance. float_rtol: relative tolerance for float32. float_atol: absolute tolerance for float32. half_rtol: relative tolerance for float16. half_atol: absolute tolerance for float16. bfloat16_rtol: relative tolerance for bfloat16. bfloat16_atol: absolute tolerance for bfloat16. msg: Optional message to report on failure. """ a = self._GetNdArray(a) b = self._GetNdArray(b) # types with lower tol are put later to overwrite previous ones. if (a.dtype == np.float32 or b.dtype == np.float32 or a.dtype == np.complex64 or b.dtype == np.complex64): rtol = max(rtol, float_rtol) atol = max(atol, float_atol) if a.dtype == np.float16 or b.dtype == np.float16: rtol = max(rtol, half_rtol) atol = max(atol, half_atol) if (a.dtype == dtypes.bfloat16.as_numpy_dtype or b.dtype == dtypes.bfloat16.as_numpy_dtype): rtol = max(rtol, bfloat16_rtol) atol = max(atol, bfloat16_atol) self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg) @py_func_if_in_function def assertNotAllClose(self, a, b, **kwargs): """Assert that two numpy arrays, or Tensors, do not have near values. Args: a: the first value to compare. b: the second value to compare. **kwargs: additional keyword arguments to be passed to the underlying `assertAllClose` call. Raises: AssertionError: If `a` and `b` are unexpectedly close at all elements. """ try: self.assertAllClose(a, b, **kwargs) except AssertionError: return raise AssertionError("The two values are close at all elements") @py_func_if_in_function def assertAllEqual(self, a, b, msg=None): """Asserts that two numpy arrays or Tensors have the same values. Args: a: the expected numpy ndarray or anything can be converted to one. b: the actual numpy ndarray or anything can be converted to one. msg: Optional message to report on failure. """ msg = msg if msg else "" a = self._GetNdArray(a) b = self._GetNdArray(b) # Arbitrary bounds so that we don't print giant tensors. if (b.ndim <= 3 or b.size < 500): self.assertEqual( a.shape, b.shape, "Shape mismatch: expected %s, got %s." " Contents: %s. \n%s." % (a.shape, b.shape, b, msg)) else: self.assertEqual( a.shape, b.shape, "Shape mismatch: expected %s, got %s." " %s" % (a.shape, b.shape, msg)) same = (a == b) if (a.dtype in [ np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype ]): same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b))) msgs = [msg] if not np.all(same): # Adds more details to np.testing.assert_array_equal. diff = np.logical_not(same) if a.ndim: x = a[np.where(diff)] y = b[np.where(diff)] msgs.append("not equal where = {}".format(np.where(diff))) else: # np.where is broken for scalars x, y = a, b msgs.append("not equal lhs = {}".format(x)) msgs.append("not equal rhs = {}".format(y)) np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs)) @py_func_if_in_function def assertAllGreater(self, a, comparison_target): """Assert element values are all greater than a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison. """ a = self._GetNdArray(a) self.assertGreater(np.min(a), comparison_target) @py_func_if_in_function def assertAllLess(self, a, comparison_target): """Assert element values are all less than a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison. """ a = self._GetNdArray(a) self.assertLess(np.max(a), comparison_target) @py_func_if_in_function def assertAllGreaterEqual(self, a, comparison_target): """Assert element values are all greater than or equal to a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison. """ a = self._GetNdArray(a) self.assertGreaterEqual(np.min(a), comparison_target) @py_func_if_in_function def assertAllLessEqual(self, a, comparison_target): """Assert element values are all less than or equal to a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison. """ a = self._GetNdArray(a) self.assertLessEqual(np.max(a), comparison_target) def _format_subscripts(self, subscripts, value, limit=10, indent=2): """Generate a summary of ndarray subscripts as a list of str. If limit == N, this method will print up to the first N subscripts on separate lines. A line of ellipses (...) will be appended at the end if the number of subscripts exceeds N. Args: subscripts: The tensor (np.ndarray) subscripts, of the same format as np.where()'s return value, i.e., a tuple of arrays with each array corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])). value: (np.ndarray) value of the tensor. limit: (int) The maximum number of indices to print. indent: (int) Number of characters to indent at the beginning of each line. Returns: (list of str) the multi-line representation of the subscripts and values, potentially with omission at the end. """ lines = [] subscripts = np.transpose(subscripts) prefix = " " * indent for subscript in itertools.islice(subscripts, limit): lines.append(prefix + str(subscript) + " : " + str(value[tuple(subscript)])) if len(subscripts) > limit: lines.append(prefix + "...") return lines @py_func_if_in_function def assertAllInRange(self, target, lower_bound, upper_bound, open_lower_bound=False, open_upper_bound=False): """Assert that elements in a Tensor are all in a given range. Args: target: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). lower_bound: lower bound of the range upper_bound: upper bound of the range open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather than the default >=) open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather than the default <=) Raises: AssertionError: if the value tensor does not have an ordered numeric type (float* or int*), or if there are nan values, or if any of the elements do not fall in the specified range. """ target = self._GetNdArray(target) if not (np.issubdtype(target.dtype, np.floating) or np.issubdtype(target.dtype, np.integer)): raise AssertionError( "The value of %s does not have an ordered numeric type, instead it " "has type: %s" % (target, target.dtype)) nan_subscripts = np.where(np.isnan(target)) if np.size(nan_subscripts): raise AssertionError( "%d of the %d element(s) are NaN. " "Subscripts(s) and value(s) of the NaN element(s):\n" % (len(nan_subscripts[0]), np.size(target)) + "\n".join(self._format_subscripts(nan_subscripts, target))) range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " + str(upper_bound) + (")" if open_upper_bound else "]")) violations = ( np.less_equal(target, lower_bound) if open_lower_bound else np.less( target, lower_bound)) violations = np.logical_or( violations, np.greater_equal(target, upper_bound) if open_upper_bound else np.greater(target, upper_bound)) violation_subscripts = np.where(violations) if np.size(violation_subscripts): raise AssertionError( "%d of the %d element(s) are outside the range %s. " % (len(violation_subscripts[0]), np.size(target), range_str) + "Subscript(s) and value(s) of the offending elements:\n" + "\n".join(self._format_subscripts(violation_subscripts, target))) @py_func_if_in_function def assertAllInSet(self, target, expected_set): """Assert that elements of a Tensor are all in a given closed set. Args: target: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). expected_set: (`list`, `tuple` or `set`) The closed set that the elements of the value of `target` are expected to fall into. Raises: AssertionError: if any of the elements do not fall into `expected_set`. """ target = self._GetNdArray(target) # Elements in target that are not in expected_set. diff = np.setdiff1d(target.flatten(), list(expected_set)) if np.size(diff): raise AssertionError("%d unique element(s) are not in the set %s: %s" % (np.size(diff), expected_set, diff)) @py_func_if_in_function def assertDTypeEqual(self, target, expected_dtype): """Assert ndarray data type is equal to expected. Args: target: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). expected_dtype: Expected data type. """ target = self._GetNdArray(target) if not isinstance(target, list): arrays = [target] for arr in arrays: self.assertEqual(arr.dtype, expected_dtype) # pylint: disable=g-doc-return-or-yield @contextlib.contextmanager def assertRaisesWithPredicateMatch(self, exception_type, expected_err_re_or_predicate): """Returns a context manager to enclose code expected to raise an exception. If the exception is an OpError, the op stack is also included in the message predicate search. Args: exception_type: The expected type of exception that should be raised. expected_err_re_or_predicate: If this is callable, it should be a function of one argument that inspects the passed-in exception and returns True (success) or False (please fail the test). Otherwise, the error message is expected to match this regular expression partially. Returns: A context manager to surround code that is expected to raise an exception. """ if callable(expected_err_re_or_predicate): predicate = expected_err_re_or_predicate else: def predicate(e): err_str = e.message if isinstance(e, errors.OpError) else str(e) op = e.op if isinstance(e, errors.OpError) else None while op is not None: err_str += "\nCaused by: " + op.name op = op._original_op # pylint: disable=protected-access logging.info("Searching within error strings: '%s' within '%s'", expected_err_re_or_predicate, err_str) return re.search(expected_err_re_or_predicate, err_str) try: yield self.fail(exception_type.__name__ + " not raised") except Exception as e: # pylint: disable=broad-except if not isinstance(e, exception_type) or not predicate(e): raise AssertionError( "Exception of type %s: %s" % (str(type(e)), str(e))) # pylint: enable=g-doc-return-or-yield def assertRaisesOpError(self, expected_err_re_or_predicate): return self.assertRaisesWithPredicateMatch(errors.OpError, expected_err_re_or_predicate) def assertShapeEqual(self, np_array, tf_tensor, msg=None): """Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape. Args: np_array: A Numpy ndarray or Numpy scalar. tf_tensor: A Tensor. msg: Optional message to report on failure. Raises: TypeError: If the arguments have the wrong type. """ if not isinstance(np_array, (np.ndarray, np.generic)): raise TypeError("np_array must be a Numpy ndarray or Numpy scalar") if not isinstance(tf_tensor, ops.Tensor): raise TypeError("tf_tensor must be a Tensor") self.assertAllEqual( np_array.shape, tf_tensor.get_shape().as_list(), msg=msg) def assertDeviceEqual(self, device1, device2, msg=None): """Asserts that the two given devices are the same. Args: device1: A string device name or TensorFlow `DeviceSpec` object. device2: A string device name or TensorFlow `DeviceSpec` object. msg: Optional message to report on failure. """ device1 = pydev.canonical_name(device1) device2 = pydev.canonical_name(device2) self.assertEqual( device1, device2, "Devices %s and %s are not equal. %s" % (device1, device2, msg)) # Fix Python 3 compatibility issues if six.PY3: # pylint: disable=invalid-name # Silence a deprecation warning assertRaisesRegexp = googletest.TestCase.assertRaisesRegex # assertItemsEqual is assertCountEqual as of 3.2. assertItemsEqual = googletest.TestCase.assertCountEqual # pylint: enable=invalid-name @contextlib.contextmanager def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu): """Set the session and its graph to global default and constrain devices.""" if context.executing_eagerly(): yield None else: with sess.graph.as_default(), sess.as_default(): if force_gpu: # Use the name of an actual device if one is detected, or # '/device:GPU:0' otherwise gpu_name = gpu_device_name() if not gpu_name: gpu_name = "/device:GPU:0" with sess.graph.device(gpu_name): yield sess elif use_gpu: yield sess else: with sess.graph.device("/device:CPU:0"): yield sess def _create_session(self, graph, config, force_gpu): """See session() for details.""" def prepare_config(config): """Returns a config for sessions. Args: config: An optional config_pb2.ConfigProto to use to configure the session. Returns: A config_pb2.ConfigProto object. """ # TODO(b/114333779): Enforce allow_soft_placement=False when # use_gpu=False. Currently many tests rely on the fact that any device # will be used even when a specific device is supposed to be used. allow_soft_placement = not force_gpu if config is None: config = config_pb2.ConfigProto() config.allow_soft_placement = allow_soft_placement config.gpu_options.per_process_gpu_memory_fraction = 0.3 elif not allow_soft_placement and config.allow_soft_placement: config_copy = config_pb2.ConfigProto() config_copy.CopyFrom(config) config = config_copy config.allow_soft_placement = False # Don't perform optimizations for tests so we don't inadvertently run # gpu ops on cpu config.graph_options.optimizer_options.opt_level = -1 # Disable Grappler constant folding since some tests & benchmarks # use constant input and become meaningless after constant folding. # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE # GRAPPLER TEAM. config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) config.graph_options.rewrite_options.pin_to_host_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) return config return ErrorLoggingSession(graph=graph, config=prepare_config(config)) def _get_cached_session(self, graph=None, config=None, force_gpu=False, crash_if_inconsistent_args=True): """See cached_session() for documentation.""" if self._cached_session is None: sess = self._create_session( graph=graph, config=config, force_gpu=force_gpu) self._cached_session = sess self._cached_graph = graph self._cached_config = config self._cached_force_gpu = force_gpu return sess else: if crash_if_inconsistent_args and self._cached_graph is not graph: raise ValueError("The graph used to get the cached session is " "different than the one that was used to create the " "session. Maybe create a new session with " "self.session()") if crash_if_inconsistent_args and self._cached_config is not config: raise ValueError("The config used to get the cached session is " "different than the one that was used to create the " "session. Maybe create a new session with " "self.session()") if crash_if_inconsistent_args and (self._cached_force_gpu is not force_gpu): raise ValueError( "The force_gpu value used to get the cached session is " "different than the one that was used to create the " "session. Maybe create a new session with " "self.session()") return self._cached_session @tf_export("test.create_local_cluster") def create_local_cluster(num_workers, num_ps, protocol="grpc", worker_config=None, ps_config=None): """Create and start local servers and return the associated `Server` objects. Example: ```python workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2) worker_sessions = [tf.Session(w.target) for w in workers] with tf.device("/job:ps/task:0"): ... with tf.device("/job:ps/task:1"): ... with tf.device("/job:worker/task:0"): ... with tf.device("/job:worker/task:1"): ... worker_sessions[0].run(...) ``` Args: num_workers: Number of worker servers to start. num_ps: Number of PS servers to start. protocol: Communication protocol. Allowed values are documented in the documentation of `tf.train.Server`. worker_config: (optional) ConfigProto to initialize workers. Can be used to instantiate multiple devices etc. ps_config: (optional) ConfigProto to initialize PS servers. Returns: A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list of `num_workers` objects of type `tf.train.Server` (all running locally); and `ps_servers` is a list of `num_ps` objects of similar type. Raises: ImportError: if portpicker module was not found at load time """ if _portpicker_import_error: raise _portpicker_import_error # pylint: disable=raising-bad-type worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)] cluster_dict = { "worker": ["localhost:%s" % port for port in worker_ports], "ps": ["localhost:%s" % port for port in ps_ports] } cs = server_lib.ClusterSpec(cluster_dict) workers = [ server_lib.Server( cs, job_name="worker", protocol=protocol, task_index=ix, config=worker_config, start=True) for ix in range(num_workers) ] ps_servers = [ server_lib.Server( cs, job_name="ps", protocol=protocol, task_index=ix, config=ps_config, start=True) for ix in range(num_ps) ] return workers, ps_servers def get_node_def_from_graph(node_name, graph_def): """Returns the `NodeDef` instance for given node name in the graph def. This method explores only the NodeDefs in `graph_def.node`. Args: node_name: Name of the NodeDef to search for. graph_def: An instance of `GraphDef` proto. Returns: the `NodeDef` instance whose name field matches the given node_name or None. """ for node_def in graph_def.node: if node_def.name == node_name: return node_def return None def set_producer_version(graph, producer_version): """Sets graph.graph_def_versions.producer to `producer_version`.""" # The C API doesn't expose altering GraphDefVersions. We can indirectly set # it via import_graph_def though. graph_def = graph_pb2.GraphDef() graph_def.versions.producer = producer_version with graph.as_default(): importer.import_graph_def(graph_def) assert graph.graph_def_versions.producer, producer_version
main.py
from flask import Flask, request, jsonify from utils.nodefinder import node_finder from utils.simulator import car_mover, simulator from entities.carpool import Carpool from entities.user import User from entities.car import Car from threading import Thread from utils.bluetooth.serial_connection import SerialConnection import time as time from utils import Coordinate import pickle app = Flask(__name__) bluetooth = SerialConnection('bluetooth') carpool = Carpool(bluetooth) gui_loaded = [False] @app.route('/api') def get_all_data(): # This endpoint is intended for use with the remote visualisation. # This is used to retrieve all information about all cars and users waiting for pickup. return carpool.json(), 200 @app.route('/api/pickup', methods=['POST']) def pickup(): global smart_car # Retrieves the cookie header and splits the string so only the value remains user_id = request.headers['Cookie'][3:] # Parses the JSON payload location = node_finder(carpool.graph, request.json["location"][0], request.json["location"][1]) destination = node_finder(carpool.graph, request.json["destination"][0], request.json["destination"][1]) # Finds or creates a user user = carpool.find_user(user_id) if user is None: user = User(user_id, location, destination) carpool.add_user(user) # Updates the user's location with the ones send in the JSON payload user.update_location(location) car = carpool.logic(location, destination) if car: # If a car was found for the customer, then this customer is added as a passenger. car.add_passenger(user) # This returns the location of the car that was found. return jsonify({"carLocation": car.location.json()}), 200 # This return message should be improved. # The intention is to return status that no car was found. return "no car" @app.route('/api/getlocation', methods=['GET']) def get_location(): user_id = request.headers['Cookie'][3:] # Find if the user_id is valid. user = carpool.find_user(user_id) if user: # If the user exists then check if the user is a passenger. for car in carpool.cars: for passenger in car.passengers: if passenger == user: # Only if the user is a passenger should the location of the car be provided. # This is to prevent unauthorised tracking of vehicles. return jsonify({"carLocation": car.location.json()}), 200 # This return message should be improved. # The intention is to signal that the user isn't a valid passenger. return "Not a user" def load_map(): try: with open('utils/mapcreator/map.txt', 'rb') as infile: data = pickle.load(infile) infile.close() return data except EOFError: print("No map loaded") def start_flask(): app.run(host='127.0.0.1', port=5000) def run_car_mover(): car_mover.run(carpool) def run_simulator(): simulator.Simulator(carpool, gui_loaded) def update_coordinates(): """ Function used for reading the car's telemetry. Infinite loop that needs to be executed in its own thread. """ # wait for the GUI to be loaded while not gui_loaded[0]: pass carpool.logic(node_finder(carpool.graph, 730, 1200), node_finder(carpool.graph, 2730, 100)) carpool.logic(node_finder(carpool.graph, 2070, 500), node_finder(carpool.graph, 30, 50)) while True: pos = bluetooth.read() # read all characters available via bluetooth if pos: for car in carpool.cars: if car.id == carpool.OUR_SMART_CAR: car.location = node_finder(carpool.graph, pos.x, pos.y) # set the ca car.visited.append(car.location) time.sleep(0.005) carpool.graph = load_map() # Create test cars to simulate and demonstrate the functionality of the server smart_car = Car(carpool.OUR_SMART_CAR, node_finder(carpool.graph, 0, 0)) b = Car("Car 2", node_finder(carpool.graph, 1500, 200)) c = Car("Car 3", node_finder(carpool.graph, 2000, 1000)) carpool.cars.extend([smart_car, b, c]) # Makes sure the following only gets executed once if __name__ == '__main__': t1 = Thread(target=start_flask) t2 = Thread(target=run_car_mover) t3 = Thread(target=run_simulator) t4 = Thread(target=update_coordinates) t1.start() t2.start() t3.start() t4.start()
afhmm.py
from collections import Counter, OrderedDict import math import pandas as pd import numpy as np from nilmtk.disaggregate import Disaggregator import cvxpy as cvx from hmmlearn import hmm from multiprocessing import Process, Manager class AFHMM(Disaggregator): def __init__(self, params): self.model = [] self.MODEL_NAME = 'AFHMM' self.models = [] self.num_appliances = 0 self.appliances = [] self.signal_aggregates = OrderedDict() self.time_period = 720 self.time_period = params.get('time_period', self.time_period) self.default_num_states = params.get('default_num_states',2) self.save_model_path = params.get('save-model-path', None) self.load_model_path = params.get('pretrained-model-path',None) self.chunk_wise_training = False if self.load_model_path: self.load_model(self.load_model_path) def partial_fit(self, train_main, train_appliances, **load_kwargs): self.models = [] self.num_appliances = 0 self.appliances = [] train_main = pd.concat(train_main, axis=0) train_app_tmp = [] for app_name, df_list in train_appliances: df_list = pd.concat(df_list, axis=0) train_app_tmp.append((app_name,df_list)) # All the initializations required by the model train_appliances = train_app_tmp learnt_model = OrderedDict() means_vector = [] one_hot_states_vector = [] pi_s_vector = [] transmat_vector = [] states_vector = [] train_main = train_main.values.flatten().reshape((-1,1)) for appliance_name, power in train_appliances: #print (appliance_name) # Learning the pi's and transistion probabliites for each appliance using a simple HMM self.appliances.append(appliance_name) X = power.values.reshape((-1,1)) learnt_model[appliance_name] = hmm.GaussianHMM(self.default_num_states, "full") # Fit learnt_model[appliance_name].fit(X) means = learnt_model[appliance_name].means_.flatten().reshape((-1,1)) states = learnt_model[appliance_name].predict(X) transmat = learnt_model[appliance_name].transmat_ counter = Counter(states.flatten()) total = 0 keys = list(counter.keys()) keys.sort() for i in keys: total+=counter[i] pi = [] for i in keys: pi.append(counter[i]/total) pi = np.array(pi) nb_classes = self.default_num_states targets = states.reshape(-1) means_vector.append(means) pi_s_vector.append(pi) transmat_vector.append(transmat.T) states_vector.append(states) self.num_appliances+=1 self.signal_aggregates[appliance_name] = (np.mean(X)*self.time_period).reshape((-1,)) self.means_vector = means_vector self.pi_s_vector = pi_s_vector self.means_vector = means_vector self.transmat_vector = transmat_vector print ("Finished Training") def disaggregate_thread(self, test_mains,index,d): # A threads that does disaggregation means_vector = self.means_vector pi_s_vector = self.pi_s_vector means_vector = self.means_vector transmat_vector = self.transmat_vector sigma = 100*np.ones((len(test_mains),1)) flag = 0 for epoch in range(6): # The alernative Minimization if epoch%2==1: usage = np.zeros((len(test_mains))) for appliance_id in range(self.num_appliances): app_usage= np.sum(s_[appliance_id]@means_vector[appliance_id],axis=1) usage+=app_usage sigma = (test_mains.flatten() - usage.flatten()).reshape((-1,1)) sigma = np.where(sigma<1,1,sigma) else: if flag==0: constraints = [] cvx_state_vectors = [] cvx_variable_matrices = [] delta = cvx.Variable(shape=(len(test_mains),1), name='delta_t') for appliance_id in range(self.num_appliances): state_vector = cvx.Variable(shape=(len(test_mains), self.default_num_states), name='state_vec-%s'%(appliance_id)) cvx_state_vectors.append(state_vector) # Enforcing that their values are ranged constraints+=[cvx_state_vectors[appliance_id]>=0] constraints+=[cvx_state_vectors[appliance_id]<=1] # Enforcing that sum of states equals 1 for t in range(len(test_mains)): # 6c constraints+=[cvx.sum(cvx_state_vectors[appliance_id][t])==1] # Creating Variable matrices for every appliance appliance_variable_matrix = [] for t in range(len(test_mains)): matrix = cvx.Variable(shape=(self.default_num_states, self.default_num_states), name='variable_matrix-%s-%d'%(appliance_id,t)) appliance_variable_matrix.append(matrix) cvx_variable_matrices.append(appliance_variable_matrix) # Enforcing that their values are ranged for t in range(len(test_mains)): constraints+=[cvx_variable_matrices[appliance_id][t]>=0] constraints+=[cvx_variable_matrices[appliance_id][t]<=1] # Constraint 6e for t in range(0,len(test_mains)): # 6e for i in range(self.default_num_states): constraints+=[cvx.sum(((cvx_variable_matrices[appliance_id][t]).T)[i]) == cvx_state_vectors[appliance_id][t][i]] # Constraint 6d for t in range(1,len(test_mains)): # 6d for i in range(self.default_num_states): constraints+=[cvx.sum(cvx_variable_matrices[appliance_id][t][i]) == cvx_state_vectors[appliance_id][t-1][i]] total_observed_reading = np.zeros((test_mains.shape)) # TOtal observed reading equals the sum of each appliance for appliance_id in range(self.num_appliances): total_observed_reading+=cvx_state_vectors[appliance_id]@means_vector[appliance_id] # Loss function to be minimized term_1 = 0 term_2 = 0 for appliance_id in range(self.num_appliances): # First loop is over appliances variable_matrix = cvx_variable_matrices[appliance_id] transmat = transmat_vector[appliance_id] # Next loop is over different time-stamps for matrix in variable_matrix: term_1-=cvx.sum(cvx.multiply(matrix,np.log(transmat))) one_hot_states = cvx_state_vectors[appliance_id] pi = pi_s_vector[appliance_id] # The expression involving start states first_one_hot_states = one_hot_states[0] term_2-= cvx.sum(cvx.multiply(first_one_hot_states,np.log(pi))) flag = 1 expression = 0 term_3 = 0 term_4 = 0 for t in range(len(test_mains)): term_4+= .5 * ((test_mains[t][0] - total_observed_reading[t][0])**2 / (sigma[t]**2)) term_3+= .5 * (np.log(sigma[t]**2)) expression = term_1 + term_2 + term_3 + term_4 expression = cvx.Minimize(expression) prob = cvx.Problem(expression, constraints,) prob.solve(solver=cvx.SCS,verbose=False,warm_start=True) s_ = [i.value for i in cvx_state_vectors] prediction_dict = {} for appliance_id in range(self.num_appliances): app_name = self.appliances[appliance_id] app_usage= np.sum(s_[appliance_id]@means_vector[appliance_id],axis=1) prediction_dict[app_name] = app_usage.flatten() # Store the result in the index corresponding to the thread. d[index] = pd.DataFrame(prediction_dict,dtype='float32') def disaggregate_chunk(self, test_mains_list): # Sistributes the test mains across multiple threads and runs them in parallel manager = Manager() d = manager.dict() predictions_lst = [] for test_mains in test_mains_list: test_mains_big = test_mains.values.flatten().reshape((-1,1)) self.arr_of_results = [] threads = [] for test_block in range(int(math.ceil(len(test_mains_big)/self.time_period))): test_mains = test_mains_big[test_block*(self.time_period):(test_block+1)*self.time_period] t = Process(target=self.disaggregate_thread, args=(test_mains,test_block,d)) threads.append(t) for t in threads: t.start() for t in threads: t.join() for i in range(len(threads)): self.arr_of_results.append(d[i]) prediction = pd.concat(self.arr_of_results,axis=0) predictions_lst.append(prediction) return predictions_lst
qt.py
#!/usr/bin/env python # # Electrum - Lightweight Tidecoin Client # Copyright (C) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from functools import partial import threading import sys import os from typing import TYPE_CHECKING from PyQt5.QtGui import QPixmap from PyQt5.QtCore import QObject, pyqtSignal from PyQt5.QtWidgets import (QTextEdit, QVBoxLayout, QLabel, QGridLayout, QHBoxLayout, QRadioButton, QCheckBox, QLineEdit) from electrum.gui.qt.util import (read_QIcon, WindowModalDialog, WaitingDialog, OkButton, CancelButton, Buttons, icon_path, WWLabel, CloseButton) from electrum.gui.qt.qrcodewidget import QRCodeWidget from electrum.gui.qt.amountedit import AmountEdit from electrum.gui.qt.main_window import StatusBarButton from electrum.gui.qt.installwizard import InstallWizard from electrum.i18n import _ from electrum.plugin import hook from electrum.util import is_valid_email from electrum.logging import Logger from electrum.base_wizard import GoBack, UserCancelled from .trustedcoin import TrustedCoinPlugin, server if TYPE_CHECKING: from electrum.gui.qt.main_window import ElectrumWindow from electrum.wallet import Abstract_Wallet class TOS(QTextEdit): tos_signal = pyqtSignal() error_signal = pyqtSignal(object) class HandlerTwoFactor(QObject, Logger): def __init__(self, plugin, window): QObject.__init__(self) self.plugin = plugin self.window = window Logger.__init__(self) def prompt_user_for_otp(self, wallet, tx, on_success, on_failure): if not isinstance(wallet, self.plugin.wallet_class): return if wallet.can_sign_without_server(): return if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True): self.logger.info("twofactor: xpub3 not needed") return window = self.window.top_level_window() auth_code = self.plugin.auth_dialog(window) WaitingDialog(parent=window, message=_('Waiting for TrustedCoin server to sign transaction...'), task=lambda: wallet.on_otp(tx, auth_code), on_success=lambda *args: on_success(tx), on_error=on_failure) class Plugin(TrustedCoinPlugin): def __init__(self, parent, config, name): super().__init__(parent, config, name) @hook def load_wallet(self, wallet: 'Abstract_Wallet', window: 'ElectrumWindow'): if not isinstance(wallet, self.wallet_class): return wallet.handler_2fa = HandlerTwoFactor(self, window) if wallet.can_sign_without_server(): msg = ' '.join([ _('This wallet was restored from seed, and it contains two master private keys.'), _('Therefore, two-factor authentication is disabled.') ]) action = lambda: window.show_message(msg) else: action = partial(self.settings_dialog, window) button = StatusBarButton(read_QIcon("trustedcoin-status.png"), _("TrustedCoin"), action) window.statusBar().addPermanentWidget(button) self.start_request_thread(window.wallet) def auth_dialog(self, window): d = WindowModalDialog(window, _("Authorization")) vbox = QVBoxLayout(d) pw = AmountEdit(None, is_int = True) msg = _('Please enter your Google Authenticator code') vbox.addWidget(QLabel(msg)) grid = QGridLayout() grid.setSpacing(8) grid.addWidget(QLabel(_('Code')), 1, 0) grid.addWidget(pw, 1, 1) vbox.addLayout(grid) msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.') label = QLabel(msg) label.setWordWrap(1) vbox.addWidget(label) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return return pw.get_amount() def prompt_user_for_otp(self, wallet, tx, on_success, on_failure): wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure) def waiting_dialog_for_billing_info(self, window, *, on_finished=None): def task(): return self.request_billing_info(window.wallet, suppress_connection_error=False) def on_error(exc_info): e = exc_info[1] window.show_error("{header}\n{exc}\n\n{tor}" .format(header=_('Error getting TrustedCoin account info.'), exc=repr(e), tor=_('If you keep experiencing network problems, try using a Tor proxy.'))) return WaitingDialog(parent=window, message=_('Requesting account info from TrustedCoin server...'), task=task, on_success=on_finished, on_error=on_error) @hook def abort_send(self, window): wallet = window.wallet if not isinstance(wallet, self.wallet_class): return if wallet.can_sign_without_server(): return if wallet.billing_info is None: self.waiting_dialog_for_billing_info(window) return True return False def settings_dialog(self, window): self.waiting_dialog_for_billing_info(window, on_finished=partial(self.show_settings_dialog, window)) def show_settings_dialog(self, window, success): if not success: window.show_message(_('Server not reachable.')) return wallet = window.wallet d = WindowModalDialog(window, _("TrustedCoin Information")) d.setMinimumSize(500, 200) vbox = QVBoxLayout(d) hbox = QHBoxLayout() logo = QLabel() logo.setPixmap(QPixmap(icon_path("trustedcoin-status.png"))) msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\ + _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>" label = QLabel(msg) label.setOpenExternalLinks(1) hbox.addStretch(10) hbox.addWidget(logo) hbox.addStretch(10) hbox.addWidget(label) hbox.addStretch(10) vbox.addLayout(hbox) vbox.addStretch(10) msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>' label = QLabel(msg) label.setWordWrap(1) vbox.addWidget(label) vbox.addStretch(10) grid = QGridLayout() vbox.addLayout(grid) price_per_tx = wallet.price_per_tx n_prepay = wallet.num_prepay() i = 0 for k, v in sorted(price_per_tx.items()): if k == 1: continue grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0) grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1) b = QRadioButton() b.setChecked(k == n_prepay) b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True)) grid.addWidget(b, i, 2) i += 1 n = wallet.billing_info.get('tx_remaining', 0) grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def go_online_dialog(self, wizard: InstallWizard): msg = [ _("Your wallet file is: {}.").format(os.path.abspath(wizard.path)), _("You need to be online in order to complete the creation of " "your wallet. If you generated your seed on an offline " 'computer, click on "{}" to close this window, move your ' "wallet file to an online computer, and reopen it with " "Electrum.").format(_('Cancel')), _('If you are online, click on "{}" to continue.').format(_('Next')) ] msg = '\n\n'.join(msg) wizard.reset_stack() try: wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use')) except (GoBack, UserCancelled): # user clicked 'Cancel' and decided to move wallet file manually storage, db = wizard.create_storage(wizard.path) raise def accept_terms_of_use(self, window): vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Terms of Service"))) tos_e = TOS() tos_e.setReadOnly(True) vbox.addWidget(tos_e) tos_received = False vbox.addWidget(QLabel(_("Please enter your e-mail address"))) email_e = QLineEdit() vbox.addWidget(email_e) next_button = window.next_button prior_button_text = next_button.text() next_button.setText(_('Accept')) def request_TOS(): try: tos = server.get_terms_of_service() except Exception as e: self.logger.exception('Could not retrieve Terms of Service') tos_e.error_signal.emit(_('Could not retrieve Terms of Service:') + '\n' + repr(e)) return self.TOS = tos tos_e.tos_signal.emit() def on_result(): tos_e.setText(self.TOS) nonlocal tos_received tos_received = True set_enabled() def on_error(msg): window.show_error(str(msg)) window.terminate() def set_enabled(): next_button.setEnabled(tos_received and is_valid_email(email_e.text())) tos_e.tos_signal.connect(on_result) tos_e.error_signal.connect(on_error) t = threading.Thread(target=request_TOS) t.setDaemon(True) t.start() email_e.textChanged.connect(set_enabled) email_e.setFocus(True) window.exec_layout(vbox, next_enabled=False) next_button.setText(prior_button_text) email = str(email_e.text()) self.create_remote_key(email, window) def request_otp_dialog(self, window, short_id, otp_secret, xpub3): vbox = QVBoxLayout() if otp_secret is not None: uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret) l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret) l.setWordWrap(True) vbox.addWidget(l) qrw = QRCodeWidget(uri) vbox.addWidget(qrw, 1) msg = _('Then, enter your Google Authenticator code:') else: label = QLabel( "This wallet is already registered with TrustedCoin. " "To finalize wallet creation, please enter your Google Authenticator Code. " ) label.setWordWrap(1) vbox.addWidget(label) msg = _('Google Authenticator code:') hbox = QHBoxLayout() hbox.addWidget(WWLabel(msg)) pw = AmountEdit(None, is_int = True) pw.setFocus(True) pw.setMaximumWidth(50) hbox.addWidget(pw) vbox.addLayout(hbox) cb_lost = QCheckBox(_("I have lost my Google Authenticator account")) cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed.")) vbox.addWidget(cb_lost) cb_lost.setVisible(otp_secret is None) def set_enabled(): b = True if cb_lost.isChecked() else len(pw.text()) == 6 window.next_button.setEnabled(b) pw.textChanged.connect(set_enabled) cb_lost.toggled.connect(set_enabled) window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False) self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
autostep_node.py
#!/usr/bin/env python from __future__ import print_function import json import threading import roslib import rospy import std_msgs.msg import scipy import scipy.interpolate from autostep import Autostep from autostep import AutostepException from autostep_ros.msg import MotionData from autostep_ros.msg import TrackingData from autostep_ros.srv import Command from autostep_ros.srv import CommandResponse class AutostepNode(object): def __init__(self,port='/dev/ttyACM0'): # Parameters self.port = rospy.get_param('port', '/dev/ttyACM0') self.step_mode= rospy.get_param('step_mode', 'STEP_FS_16') self.fullstep_per_rev = rospy.get_param('fullstep_per_rev', 200) self.gear_ratio = rospy.get_param('gear_ratio', 2.0) self.tracking_mode_gain = rospy.get_param('tracking_mode_gain', 5.0) self.tracking_mode_absolute = rospy.get_param('tracking_mode_absolute', True) self.max_mode_params = rospy.get_param('max_mode_params', { 'speed': 1000, 'accel': 10000, 'decel': 10000 }) self.jog_mode_params = rospy.get_param('max_mode_params', { 'speed': 400, 'accel': 800, 'decel': 800 }) # Tracking mode state data self.tracking_mode_is_first = False self.tracking_mode_first_update_t = 0.0 self.tracking_mode_last_update_t = 0.0 self.tracking_mode_position = 0.0 self.tracking_mode_velocity = 0.0 self.tracking_mode_position_start = 0.0 self.initialize() self.enable() rospy.init_node('autostep') self.motion_pub = rospy.Publisher('motion_data', MotionData, queue_size=10) self.tracking_sub = rospy.Subscriber('tracking_data', TrackingData, self.on_tracking_data_callback) self.command_srv_table = { 'run' : self.on_run_command, 'enable' : self.on_enable_command, 'release' : self.on_release_command, 'is_busy' : self.on_is_busy_command, 'was_stopped' : self.on_was_stopped_command, 'move_to' : self.on_move_to_command, 'move_by' : self.on_move_by_command, 'soft_stop' : self.on_soft_stop_command, 'set_position' : self.on_set_position_command, 'get_position' : self.on_get_position_command, 'set_move_mode' : self.on_set_move_mode_command, 'get_jog_mode_params' : self.on_get_jog_mode_params, 'set_jog_mode_params' : self.on_set_jog_mode_params, 'get_max_mode_params' : self.on_get_max_mode_params, 'set_max_mode_params' : self.on_set_max_mode_params, 'get_params' : self.on_get_params_command, 'print_params' : self.on_print_params_command, 'sinusoid' : self.on_sinusoid_command, 'move_to_sinusoid_start' : self.on_move_to_sinusoid_start_command, 'run_trajectory' : self.on_run_trajectory_command, 'enable_tracking_mode' : self.on_enable_tracking_mode_command, 'disable_tracking_mode' : self.on_disable_tracking_mode_command, } self.command_srv = rospy.Service('command', Command, self.command_srv_callback) self.lock = threading.Lock() self.tracking_mode_enabled = False self.running_motion_cmd = False self.stopped_flag = False def initialize(self): self.autostep = Autostep(self.port) self.autostep.set_step_mode(self.step_mode) self.autostep.set_fullstep_per_rev(self.fullstep_per_rev) self.autostep.set_gear_ratio(self.gear_ratio) self.autostep.set_move_mode_to_jog() self.autostep.set_jog_mode_params(self.jog_mode_params) self.autostep.set_max_mode_params(self.max_mode_params) self.have_sensor = False def enable(self): self.autostep.enable() self.enabled_flag = True def release(self): self.autostep.release() self.enabled_flag = False def command_srv_callback(self,req): args_dict = {} if req.args_json != '': args_json = req.args_json.replace('\{','{') args_json = args_json.replace('\}','}') args_dict = json.loads(args_json) ok = False try: command_method = self.command_srv_table[req.command] ok = True except KeyError: rsp_dict = {'success': False,'message':'unknown command'} if ok: rsp_dict = command_method(args_dict) return CommandResponse(json.dumps(rsp_dict)) def on_run_command(self,args_dict): self.stopped_flag = False ok = False velocity = 0.0 rsp_dict = {} try: velocity = args_dict['velocity'] rsp_dict['success'] = True rsp_dict['message'] = '' ok = True except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'velocity argument missing' if ok: self.autostep.run(velocity) return rsp_dict def on_enable_command(self,args_dict): self.enable() return {'success': True, 'message': ''} def on_release_command(self,args_dict): self.release() return {'success': True, 'message': ''} def on_move_to_command(self,args_dict): self.stopped_flag = False ok = False position = 0.0 rsp_dict = {} try: position = args_dict['position'] rsp_dict['success'] = True rsp_dict['message'] = '' ok = True except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'position argument missing' if ok: self.autostep.move_to(position) return rsp_dict def on_move_by_command(self, args_dict): self.stopped_flag = False ok = False position = 0.0 rsp_dict = {} try: step = args_dict['step'] rsp_dict['success'] = True rsp_dict['message'] = '' ok = True except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'step argument missing' if ok: self.autostep.move_by(step) return rsp_dict def on_soft_stop_command(self,args_dict): self.stopped_flag = True with self.lock: self.tracking_mode_enabled = False self.running_motion_cmd = False self.autostep.soft_stop() return {'success': True, 'message': ''} def on_is_busy_command(self,args_dict): is_busy = self.autostep.is_busy() if is_busy or self.running_motion_cmd or self.tracking_mode_enabled: return {'success': True,'message': '','is_busy': True} else: return {'success': True,'message': '','is_busy': False} def on_was_stopped_command(self,args_dict): return {'success': True, 'message': '', 'was_stopped': self.stopped_flag} def on_get_position_command(self,args_dict): position = self.autostep.get_position() return {'success': True, 'message': '', 'position': position} def on_set_position_command(self,args_dict): ok = False position = 0.0 rsp_dict = {} try: position = args_dict['position'] rsp_dict['success'] = True rsp_dict['message'] = '' ok = True except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'position argument missing' if ok: self.autostep.set_position(position) return rsp_dict def on_sinusoid_command(self,args_dict): self.stopped_flag = False ok = True param = {} rsp_dict = {'message': ''} param_keys = ['amplitude', 'period', 'phase', 'offset', 'num_cycle'] for key in param_keys: try: param[key] = args_dict[key] except KeyError: ok = False if len(rsp_dict['message']) > 0: rsp_dict['message'] += ', ' rsp_dict['message'] += '{} argument missing'.format(key) if ok: def motion_data_callback(elapsed_time, position, setpoint, sensor): if not self.have_sensor: sensor = 0.0 header = std_msgs.msg.Header() header.stamp = rospy.Time.now() self.motion_pub.publish(MotionData(header, elapsed_time, position, setpoint, sensor)) def motion_done_callback(): with self.lock: self.autostep.set_move_mode_to_jog() self.autostep.run(0.0) self.running_motion_cmd = False # Launch sinusoid in separate thread thread_args = [param,motion_data_callback,motion_done_callback] motion_thread = threading.Thread(target=self.autostep.sinusoid,args=thread_args) with self.lock: self.running_motion_cmd = True motion_thread.start() rsp_dict['success'] = True else: rsp_dict['success'] = False return rsp_dict def on_move_to_sinusoid_start_command(self,args_dict): ok = True param = {} rsp_dict = {'message': ''} param_keys = ['amplitude', 'period', 'phase', 'offset', 'num_cycle'] for key in param_keys: try: param[key] = args_dict[key] except KeyError: ok = False if len(rsp_dict['message']) > 0: rsp_dict['message'] += ', ' rsp_dict['message'] += '{} argument missing'.format(key) if ok: self.autostep.move_to_sinusoid_start(param) rsp_dict['success'] = True else: rsp_dict['success'] = False return rsp_dict def on_set_move_mode_command(self,args_dict): ok = False mode = '' rsp_dict = {} try: mode = args_dict['mode'] rsp_dict['success'] = True rsp_dict['message'] = '' ok = True except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'mode argument missing' if ok: if mode == 'max': self.autostep.set_move_mode_to_max() elif mode == 'jog': self.autostep.set_move_mode_to_jog() else: rsp_dict['success'] = False rsp_dict['message'] = "mode must be 'max' or 'jog'" return rsp_dict def on_get_jog_mode_params(self,args_dict): jog_mode_params = self.autostep.get_jog_mode_params() rsp_dict = {'success': True, 'message': '', 'params': jog_mode_params} return rsp_dict def on_set_jog_mode_params(self, args_dict): ok = False rsp_dict = {} try: jog_mode_params = args_dict['params'] rsp_dict['success'] = True rsp_dict['message'] = '' ok = True except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'position argument missing' if ok: try: self.autostep.set_jog_mode_params(jog_mode_params) self.jog_mode_params = jog_mode_params except AutostepException: rsp_dict['success'] = False rsp_dict['message'] = 'setting jog mode parameters failed' return rsp_dict def on_get_max_mode_params(self, args_dict): max_mode_params = self.autostep.get_max_mode_params() rsp_dict = {'success': True, 'message': '', 'params': max_mode_params} return rsp_dict def on_set_max_mode_params(self, args_dict): ok = False rsp_dict = {} try: max_mode_params = args_dict['params'] rsp_dict['success'] = True rsp_dict['message'] = '' ok = True except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'position argument missing' if ok: try: self.autostep.set_max_mode_params(max_mode_params) self.max_mode_params = max_mode_params except AutostepException: rsp_dict['success'] = False rsp_dict['message'] = 'setting max mode parameters failed' return rsp_dict def on_print_params_command(self,args_dict): self.autostep.print_params() return {'success': True, 'message': ''} def on_print_params_command(self,args_dict): self.autostep.print_params() return {'success': True, 'message': ''} def on_get_params_command(self,args_dict): params = self.autostep.get_params() return {'success': True, 'message': '', 'params': params} def on_run_trajectory_command(self, args_dict): self.stopped_flag = False rsp_dict = {} try: position = args_dict['position'] except KeyError: rsp_dict['success'] = False rsp_dict['message'] = 'position (array) argument missing' return rsp_dict position = scipy.array(position) dt = Autostep.TrajectoryDt velocity = scipy.zeros(position.shape) velocity[1:] = (position[1:] - position[:-1])/dt t = dt*scipy.arange(0,position.shape[0]) t_done = t[-1] position_func = scipy.interpolate.interp1d(t,position,kind='linear') velocity_func = scipy.interpolate.interp1d(t,velocity,kind='linear') def motion_data_callback(elapsed_time, position, setpoint): header = std_msgs.msg.Header() header.stamp = rospy.Time.now() self.motion_pub.publish(MotionData(header, elapsed_time, position, setpoint, 0.0)) def motion_done_callback(): with self.lock: self.autostep.set_move_mode_to_jog() self.autostep.run(0.0) self.running_motion_cmd = False # Launch sinusoid in separate thread thread_args = [t_done,position_func,velocity_func,False,motion_data_callback,motion_done_callback] motion_thread = threading.Thread(target=self.autostep.run_trajectory,args=thread_args) with self.lock: self.running_motion_cmd = True motion_thread.start() return {'success': True,'message': ''} def on_enable_tracking_mode_command(self, args_dict): self.stopped_flag = False with self.lock: self.autostep.run(0.0) self.autostep.set_move_mode_to_max() self.tracking_mode_enabled = True self.tracking_mode_is_first = True return {'success': True, 'message': ''} def on_disable_tracking_mode_command(self, args_dict): with self.lock: self.tracking_mode_enabled = False self.autostep.set_move_mode_to_jog() self.autostep.run(0.0) return {'success': True, 'message': ''} def on_tracking_data_callback(self,msg): with self.lock: tracking_mode_enabled = self.tracking_mode_enabled if self.tracking_mode_enabled: if self.tracking_mode_is_first: self.tracking_mode_is_first = False self.tracking_mode_first_update_t = rospy.get_time() self.tracking_mode_last_update_t = self.tracking_mode_first_update_t self.tracking_mode_position_start = self.autostep.get_position() self.tracking_mode_position = self.tracking_mode_position_start predicted_position = self.tracking_mode_position self.tracking_mode_velocity = 0.0 else: current_time = rospy.get_time() update_dt = current_time - self.tracking_mode_last_update_t predicted_position = self.tracking_mode_position + update_dt*self.tracking_mode_velocity if self.tracking_mode_absolute: position_error = msg.position - predicted_position else: position_error = msg.position - (predicted_position - self.tracking_mode_position_start) new_velocity = self.tracking_mode_gain*position_error + msg.velocity true_position = self.autostep.run_with_feedback(new_velocity) self.tracking_mode_position = true_position self.tracking_mode_velocity = new_velocity self.tracking_mode_last_update_t = current_time #rospy.logwarn(position_error) header = std_msgs.msg.Header() header.stamp = rospy.Time.now() elapsed_time = self.tracking_mode_last_update_t - self.tracking_mode_first_update_t self.motion_pub.publish(MotionData(header, elapsed_time, self.tracking_mode_position, predicted_position, 0.0)) def run(self): while not rospy.is_shutdown(): rospy.sleep(0.1) self.autostep.run(0.0) # --------------------------------------------------------------------------------------- if __name__ == '__main__': node = AutostepNode() node.run()
test_gc.py
import unittest from test.support import (verbose, refcount_test, run_unittest, strip_python_stderr, cpython_only, start_threads, temp_dir, requires_type_collecting, TESTFN, unlink, import_module) from test.support.script_helper import assert_python_ok, make_script import gc import sys import sysconfig import textwrap import threading import time import weakref try: from _testcapi import with_tp_del except ImportError: def with_tp_del(cls): class C(object): def __new__(cls, *args, **kwargs): raise TypeError('requires _testcapi.with_tp_del') return C ### Support code ############################################################################### # Bug 1055820 has several tests of longstanding bugs involving weakrefs and # cyclic gc. # An instance of C1055820 has a self-loop, so becomes cyclic trash when # unreachable. class C1055820(object): def __init__(self, i): self.i = i self.loop = self class GC_Detector(object): # Create an instance I. Then gc hasn't happened again so long as # I.gc_happened is false. def __init__(self): self.gc_happened = False def it_happened(ignored): self.gc_happened = True # Create a piece of cyclic trash that triggers it_happened when # gc collects it. self.wr = weakref.ref(C1055820(666), it_happened) @with_tp_del class Uncollectable(object): """Create a reference cycle with multiple __del__ methods. An object in a reference cycle will never have zero references, and so must be garbage collected. If one or more objects in the cycle have __del__ methods, the gc refuses to guess an order, and leaves the cycle uncollected.""" def __init__(self, partner=None): if partner is None: self.partner = Uncollectable(partner=self) else: self.partner = partner def __tp_del__(self): pass if sysconfig.get_config_vars().get('PY_CFLAGS', ''): BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS']) else: # Usually, sys.gettotalrefcount() is only present if Python has been # compiled in debug mode. If it's missing, expect that Python has # been released in release mode: with NDEBUG defined. BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount')) ### Tests ############################################################################### class GCTests(unittest.TestCase): def test_list(self): l = [] l.append(l) gc.collect() del l self.assertEqual(gc.collect(), 1) def test_dict(self): d = {} d[1] = d gc.collect() del d self.assertEqual(gc.collect(), 1) def test_tuple(self): # since tuples are immutable we close the loop with a list l = [] t = (l,) l.append(t) gc.collect() del t del l self.assertEqual(gc.collect(), 2) def test_class(self): class A: pass A.a = A gc.collect() del A self.assertNotEqual(gc.collect(), 0) def test_newstyleclass(self): class A(object): pass gc.collect() del A self.assertNotEqual(gc.collect(), 0) def test_instance(self): class A: pass a = A() a.a = a gc.collect() del a self.assertNotEqual(gc.collect(), 0) @requires_type_collecting def test_newinstance(self): class A(object): pass a = A() a.a = a gc.collect() del a self.assertNotEqual(gc.collect(), 0) class B(list): pass class C(B, A): pass a = C() a.a = a gc.collect() del a self.assertNotEqual(gc.collect(), 0) del B, C self.assertNotEqual(gc.collect(), 0) A.a = A() del A self.assertNotEqual(gc.collect(), 0) self.assertEqual(gc.collect(), 0) def test_method(self): # Tricky: self.__init__ is a bound method, it references the instance. class A: def __init__(self): self.init = self.__init__ a = A() gc.collect() del a self.assertNotEqual(gc.collect(), 0) @cpython_only def test_legacy_finalizer(self): # A() is uncollectable if it is part of a cycle, make sure it shows up # in gc.garbage. @with_tp_del class A: def __tp_del__(self): pass class B: pass a = A() a.a = a id_a = id(a) b = B() b.b = b gc.collect() del a del b self.assertNotEqual(gc.collect(), 0) for obj in gc.garbage: if id(obj) == id_a: del obj.a break else: self.fail("didn't find obj in garbage (finalizer)") gc.garbage.remove(obj) @cpython_only def test_legacy_finalizer_newclass(self): # A() is uncollectable if it is part of a cycle, make sure it shows up # in gc.garbage. @with_tp_del class A(object): def __tp_del__(self): pass class B(object): pass a = A() a.a = a id_a = id(a) b = B() b.b = b gc.collect() del a del b self.assertNotEqual(gc.collect(), 0) for obj in gc.garbage: if id(obj) == id_a: del obj.a break else: self.fail("didn't find obj in garbage (finalizer)") gc.garbage.remove(obj) def test_function(self): # Tricky: f -> d -> f, code should call d.clear() after the exec to # break the cycle. d = {} exec("def f(): pass\n", d) gc.collect() del d self.assertEqual(gc.collect(), 2) @refcount_test def test_frame(self): def f(): frame = sys._getframe() gc.collect() f() self.assertEqual(gc.collect(), 1) def test_saveall(self): # Verify that cyclic garbage like lists show up in gc.garbage if the # SAVEALL option is enabled. # First make sure we don't save away other stuff that just happens to # be waiting for collection. gc.collect() # if this fails, someone else created immortal trash self.assertEqual(gc.garbage, []) L = [] L.append(L) id_L = id(L) debug = gc.get_debug() gc.set_debug(debug | gc.DEBUG_SAVEALL) del L gc.collect() gc.set_debug(debug) self.assertEqual(len(gc.garbage), 1) obj = gc.garbage.pop() self.assertEqual(id(obj), id_L) def test_del(self): # __del__ methods can trigger collection, make this to happen thresholds = gc.get_threshold() gc.enable() gc.set_threshold(1) class A: def __del__(self): dir(self) a = A() del a gc.disable() gc.set_threshold(*thresholds) def test_del_newclass(self): # __del__ methods can trigger collection, make this to happen thresholds = gc.get_threshold() gc.enable() gc.set_threshold(1) class A(object): def __del__(self): dir(self) a = A() del a gc.disable() gc.set_threshold(*thresholds) # The following two tests are fragile: # They precisely count the number of allocations, # which is highly implementation-dependent. # For example, disposed tuples are not freed, but reused. # To minimize variations, though, we first store the get_count() results # and check them at the end. @refcount_test def test_get_count(self): gc.collect() a, b, c = gc.get_count() x = [] d, e, f = gc.get_count() self.assertEqual((b, c), (0, 0)) self.assertEqual((e, f), (0, 0)) # This is less fragile than asserting that a equals 0. self.assertLess(a, 5) # Between the two calls to get_count(), at least one object was # created (the list). self.assertGreater(d, a) @refcount_test def test_collect_generations(self): gc.collect() # This object will "trickle" into generation N + 1 after # each call to collect(N) x = [] gc.collect(0) # x is now in gen 1 a, b, c = gc.get_count() gc.collect(1) # x is now in gen 2 d, e, f = gc.get_count() gc.collect(2) # x is now in gen 3 g, h, i = gc.get_count() # We don't check a, d, g since their exact values depends on # internal implementation details of the interpreter. self.assertEqual((b, c), (1, 0)) self.assertEqual((e, f), (0, 1)) self.assertEqual((h, i), (0, 0)) def test_trashcan(self): class Ouch: n = 0 def __del__(self): Ouch.n = Ouch.n + 1 if Ouch.n % 17 == 0: gc.collect() # "trashcan" is a hack to prevent stack overflow when deallocating # very deeply nested tuples etc. It works in part by abusing the # type pointer and refcount fields, and that can yield horrible # problems when gc tries to traverse the structures. # If this test fails (as it does in 2.0, 2.1 and 2.2), it will # most likely die via segfault. # Note: In 2.3 the possibility for compiling without cyclic gc was # removed, and that in turn allows the trashcan mechanism to work # via much simpler means (e.g., it never abuses the type pointer or # refcount fields anymore). Since it's much less likely to cause a # problem now, the various constants in this expensive (we force a lot # of full collections) test are cut back from the 2.2 version. gc.enable() N = 150 for count in range(2): t = [] for i in range(N): t = [t, Ouch()] u = [] for i in range(N): u = [u, Ouch()] v = {} for i in range(N): v = {1: v, 2: Ouch()} gc.disable() def test_trashcan_threads(self): # Issue #13992: trashcan mechanism should be thread-safe NESTING = 60 N_THREADS = 2 def sleeper_gen(): """A generator that releases the GIL when closed or dealloc'ed.""" try: yield finally: time.sleep(0.000001) class C(list): # Appending to a list is atomic, which avoids the use of a lock. inits = [] dels = [] def __init__(self, alist): self[:] = alist C.inits.append(None) def __del__(self): # This __del__ is called by subtype_dealloc(). C.dels.append(None) # `g` will release the GIL when garbage-collected. This # helps assert subtype_dealloc's behaviour when threads # switch in the middle of it. g = sleeper_gen() next(g) # Now that __del__ is finished, subtype_dealloc will proceed # to call list_dealloc, which also uses the trashcan mechanism. def make_nested(): """Create a sufficiently nested container object so that the trashcan mechanism is invoked when deallocating it.""" x = C([]) for i in range(NESTING): x = [C([x])] del x def run_thread(): """Exercise make_nested() in a loop.""" while not exit: make_nested() old_switchinterval = sys.getswitchinterval() sys.setswitchinterval(1e-5) try: exit = [] threads = [] for i in range(N_THREADS): t = threading.Thread(target=run_thread) threads.append(t) with start_threads(threads, lambda: exit.append(1)): time.sleep(1.0) finally: sys.setswitchinterval(old_switchinterval) gc.collect() self.assertEqual(len(C.inits), len(C.dels)) def test_boom(self): class Boom: def __getattr__(self, someattribute): del self.attr raise AttributeError a = Boom() b = Boom() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b # a<->b are in a trash cycle now. Collection will invoke # Boom.__getattr__ (to see whether a and b have __del__ methods), and # __getattr__ deletes the internal "attr" attributes as a side effect. # That causes the trash cycle to get reclaimed via refcounts falling to # 0, thus mutating the trash graph as a side effect of merely asking # whether __del__ exists. This used to (before 2.3b1) crash Python. # Now __getattr__ isn't called. self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_boom2(self): class Boom2: def __init__(self): self.x = 0 def __getattr__(self, someattribute): self.x += 1 if self.x > 1: del self.attr raise AttributeError a = Boom2() b = Boom2() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b # Much like test_boom(), except that __getattr__ doesn't break the # cycle until the second time gc checks for __del__. As of 2.3b1, # there isn't a second time, so this simply cleans up the trash cycle. # We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get # reclaimed this way. self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_boom_new(self): # boom__new and boom2_new are exactly like boom and boom2, except use # new-style classes. class Boom_New(object): def __getattr__(self, someattribute): del self.attr raise AttributeError a = Boom_New() b = Boom_New() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_boom2_new(self): class Boom2_New(object): def __init__(self): self.x = 0 def __getattr__(self, someattribute): self.x += 1 if self.x > 1: del self.attr raise AttributeError a = Boom2_New() b = Boom2_New() a.attr = b b.attr = a gc.collect() garbagelen = len(gc.garbage) del a, b self.assertEqual(gc.collect(), 4) self.assertEqual(len(gc.garbage), garbagelen) def test_get_referents(self): alist = [1, 3, 5] got = gc.get_referents(alist) got.sort() self.assertEqual(got, alist) atuple = tuple(alist) got = gc.get_referents(atuple) got.sort() self.assertEqual(got, alist) adict = {1: 3, 5: 7} expected = [1, 3, 5, 7] got = gc.get_referents(adict) got.sort() self.assertEqual(got, expected) got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0)) got.sort() self.assertEqual(got, [0, 0] + list(range(5))) self.assertEqual(gc.get_referents(1, 'a', 4j), []) def test_is_tracked(self): # Atomic built-in types are not tracked, user-defined objects and # mutable containers are. # NOTE: types with special optimizations (e.g. tuple) have tests # in their own test files instead. self.assertFalse(gc.is_tracked(None)) self.assertFalse(gc.is_tracked(1)) self.assertFalse(gc.is_tracked(1.0)) self.assertFalse(gc.is_tracked(1.0 + 5.0j)) self.assertFalse(gc.is_tracked(True)) self.assertFalse(gc.is_tracked(False)) self.assertFalse(gc.is_tracked(b"a")) self.assertFalse(gc.is_tracked("a")) self.assertFalse(gc.is_tracked(bytearray(b"a"))) self.assertFalse(gc.is_tracked(type)) self.assertFalse(gc.is_tracked(int)) self.assertFalse(gc.is_tracked(object)) self.assertFalse(gc.is_tracked(object())) class UserClass: pass class UserInt(int): pass # Base class is object; no extra fields. class UserClassSlots: __slots__ = () # Base class is fixed size larger than object; no extra fields. class UserFloatSlots(float): __slots__ = () # Base class is variable size; no extra fields. class UserIntSlots(int): __slots__ = () self.assertTrue(gc.is_tracked(gc)) self.assertTrue(gc.is_tracked(UserClass)) self.assertTrue(gc.is_tracked(UserClass())) self.assertTrue(gc.is_tracked(UserInt())) self.assertTrue(gc.is_tracked([])) self.assertTrue(gc.is_tracked(set())) self.assertFalse(gc.is_tracked(UserClassSlots())) self.assertFalse(gc.is_tracked(UserFloatSlots())) self.assertFalse(gc.is_tracked(UserIntSlots())) def test_bug1055820b(self): # Corresponds to temp2b.py in the bug report. ouch = [] def callback(ignored): ouch[:] = [wr() for wr in WRs] Cs = [C1055820(i) for i in range(2)] WRs = [weakref.ref(c, callback) for c in Cs] c = None gc.collect() self.assertEqual(len(ouch), 0) # Make the two instances trash, and collect again. The bug was that # the callback materialized a strong reference to an instance, but gc # cleared the instance's dict anyway. Cs = None gc.collect() self.assertEqual(len(ouch), 2) # else the callbacks didn't run for x in ouch: # If the callback resurrected one of these guys, the instance # would be damaged, with an empty __dict__. self.assertEqual(x, None) def test_bug21435(self): # This is a poor test - its only virtue is that it happened to # segfault on Tim's Windows box before the patch for 21435 was # applied. That's a nasty bug relying on specific pieces of cyclic # trash appearing in exactly the right order in finalize_garbage()'s # input list. # But there's no reliable way to force that order from Python code, # so over time chances are good this test won't really be testing much # of anything anymore. Still, if it blows up, there's _some_ # problem ;-) gc.collect() class A: pass class B: def __init__(self, x): self.x = x def __del__(self): self.attr = None def do_work(): a = A() b = B(A()) a.attr = b b.attr = a do_work() gc.collect() # this blows up (bad C pointer) when it fails @cpython_only def test_garbage_at_shutdown(self): import subprocess code = """if 1: import gc import _testcapi @_testcapi.with_tp_del class X: def __init__(self, name): self.name = name def __repr__(self): return "<X %%r>" %% self.name def __tp_del__(self): pass x = X('first') x.x = x x.y = X('second') del x gc.set_debug(%s) """ def run_command(code): p = subprocess.Popen([sys.executable, "-Wd", "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() p.stdout.close() p.stderr.close() self.assertEqual(p.returncode, 0) self.assertEqual(stdout.strip(), b"") return strip_python_stderr(stderr) stderr = run_command(code % "0") self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at " b"shutdown; use", stderr) self.assertNotIn(b"<X 'first'>", stderr) # With DEBUG_UNCOLLECTABLE, the garbage list gets printed stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE") self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at " b"shutdown", stderr) self.assertTrue( (b"[<X 'first'>, <X 'second'>]" in stderr) or (b"[<X 'second'>, <X 'first'>]" in stderr), stderr) # With DEBUG_SAVEALL, no additional message should get printed # (because gc.garbage also contains normally reclaimable cyclic # references, and its elements get printed at runtime anyway). stderr = run_command(code % "gc.DEBUG_SAVEALL") self.assertNotIn(b"uncollectable objects at shutdown", stderr) @requires_type_collecting def test_gc_main_module_at_shutdown(self): # Create a reference cycle through the __main__ module and check # it gets collected at interpreter shutdown. code = """if 1: class C: def __del__(self): print('__del__ called') l = [C()] l.append(l) """ rc, out, err = assert_python_ok('-c', code) self.assertEqual(out.strip(), b'__del__ called') @requires_type_collecting def test_gc_ordinary_module_at_shutdown(self): # Same as above, but with a non-__main__ module. with temp_dir() as script_dir: module = """if 1: class C: def __del__(self): print('__del__ called') l = [C()] l.append(l) """ code = """if 1: import sys sys.path.insert(0, %r) import gctest """ % (script_dir,) make_script(script_dir, 'gctest', module) rc, out, err = assert_python_ok('-c', code) self.assertEqual(out.strip(), b'__del__ called') @requires_type_collecting def test_global_del_SystemExit(self): code = """if 1: class ClassWithDel: def __del__(self): print('__del__ called') a = ClassWithDel() a.link = a raise SystemExit(0)""" self.addCleanup(unlink, TESTFN) with open(TESTFN, 'w') as script: script.write(code) rc, out, err = assert_python_ok(TESTFN) self.assertEqual(out.strip(), b'__del__ called') def test_get_stats(self): stats = gc.get_stats() self.assertEqual(len(stats), 3) for st in stats: self.assertIsInstance(st, dict) self.assertEqual(set(st), {"collected", "collections", "uncollectable"}) self.assertGreaterEqual(st["collected"], 0) self.assertGreaterEqual(st["collections"], 0) self.assertGreaterEqual(st["uncollectable"], 0) # Check that collection counts are incremented correctly if gc.isenabled(): self.addCleanup(gc.enable) gc.disable() old = gc.get_stats() gc.collect(0) new = gc.get_stats() self.assertEqual(new[0]["collections"], old[0]["collections"] + 1) self.assertEqual(new[1]["collections"], old[1]["collections"]) self.assertEqual(new[2]["collections"], old[2]["collections"]) gc.collect(2) new = gc.get_stats() self.assertEqual(new[0]["collections"], old[0]["collections"] + 1) self.assertEqual(new[1]["collections"], old[1]["collections"]) self.assertEqual(new[2]["collections"], old[2]["collections"] + 1) def test_freeze(self): gc.freeze() self.assertGreater(gc.get_freeze_count(), 0) gc.unfreeze() self.assertEqual(gc.get_freeze_count(), 0) def test_get_objects(self): gc.collect() l = [] l.append(l) self.assertTrue( any(l is element for element in gc.get_objects(generation=0)) ) self.assertFalse( any(l is element for element in gc.get_objects(generation=1)) ) self.assertFalse( any(l is element for element in gc.get_objects(generation=2)) ) gc.collect(generation=0) self.assertFalse( any(l is element for element in gc.get_objects(generation=0)) ) self.assertTrue( any(l is element for element in gc.get_objects(generation=1)) ) self.assertFalse( any(l is element for element in gc.get_objects(generation=2)) ) gc.collect(generation=1) self.assertFalse( any(l is element for element in gc.get_objects(generation=0)) ) self.assertFalse( any(l is element for element in gc.get_objects(generation=1)) ) self.assertTrue( any(l is element for element in gc.get_objects(generation=2)) ) gc.collect(generation=2) self.assertFalse( any(l is element for element in gc.get_objects(generation=0)) ) self.assertFalse( any(l is element for element in gc.get_objects(generation=1)) ) self.assertTrue( any(l is element for element in gc.get_objects(generation=2)) ) del l gc.collect() def test_get_objects_arguments(self): gc.collect() self.assertEqual(len(gc.get_objects()), len(gc.get_objects(generation=None))) self.assertRaises(ValueError, gc.get_objects, 1000) self.assertRaises(ValueError, gc.get_objects, -1000) self.assertRaises(TypeError, gc.get_objects, "1") self.assertRaises(TypeError, gc.get_objects, 1.234) def test_38379(self): # When a finalizer resurrects objects, stats were reporting them as # having been collected. This affected both collect()'s return # value and the dicts returned by get_stats(). N = 100 class A: # simple self-loop def __init__(self): self.me = self class Z(A): # resurrecting __del__ def __del__(self): zs.append(self) zs = [] def getstats(): d = gc.get_stats()[-1] return d['collected'], d['uncollectable'] gc.collect() gc.disable() # No problems if just collecting A() instances. oldc, oldnc = getstats() for i in range(N): A() t = gc.collect() c, nc = getstats() self.assertEqual(t, 2*N) # instance object & its dict self.assertEqual(c - oldc, 2*N) self.assertEqual(nc - oldnc, 0) # But Z() is not actually collected. oldc, oldnc = c, nc Z() # Nothing is collected - Z() is merely resurrected. t = gc.collect() c, nc = getstats() #self.assertEqual(t, 2) # before self.assertEqual(t, 0) # after #self.assertEqual(c - oldc, 2) # before self.assertEqual(c - oldc, 0) # after self.assertEqual(nc - oldnc, 0) # Unfortunately, a Z() prevents _anything_ from being collected. # It should be possible to collect the A instances anyway, but # that will require non-trivial code changes. oldc, oldnc = c, nc for i in range(N): A() Z() # Z() prevents anything from being collected. t = gc.collect() c, nc = getstats() #self.assertEqual(t, 2*N + 2) # before self.assertEqual(t, 0) # after #self.assertEqual(c - oldc, 2*N + 2) # before self.assertEqual(c - oldc, 0) # after self.assertEqual(nc - oldnc, 0) # But the A() trash is reclaimed on the next run. oldc, oldnc = c, nc t = gc.collect() c, nc = getstats() self.assertEqual(t, 2*N) self.assertEqual(c - oldc, 2*N) self.assertEqual(nc - oldnc, 0) gc.enable() class GCCallbackTests(unittest.TestCase): def setUp(self): # Save gc state and disable it. self.enabled = gc.isenabled() gc.disable() self.debug = gc.get_debug() gc.set_debug(0) gc.callbacks.append(self.cb1) gc.callbacks.append(self.cb2) self.othergarbage = [] def tearDown(self): # Restore gc state del self.visit gc.callbacks.remove(self.cb1) gc.callbacks.remove(self.cb2) gc.set_debug(self.debug) if self.enabled: gc.enable() # destroy any uncollectables gc.collect() for obj in gc.garbage: if isinstance(obj, Uncollectable): obj.partner = None del gc.garbage[:] del self.othergarbage gc.collect() def preclean(self): # Remove all fluff from the system. Invoke this function # manually rather than through self.setUp() for maximum # safety. self.visit = [] gc.collect() garbage, gc.garbage[:] = gc.garbage[:], [] self.othergarbage.append(garbage) self.visit = [] def cb1(self, phase, info): self.visit.append((1, phase, dict(info))) def cb2(self, phase, info): self.visit.append((2, phase, dict(info))) if phase == "stop" and hasattr(self, "cleanup"): # Clean Uncollectable from garbage uc = [e for e in gc.garbage if isinstance(e, Uncollectable)] gc.garbage[:] = [e for e in gc.garbage if not isinstance(e, Uncollectable)] for e in uc: e.partner = None def test_collect(self): self.preclean() gc.collect() # Algorithmically verify the contents of self.visit # because it is long and tortuous. # Count the number of visits to each callback n = [v[0] for v in self.visit] n1 = [i for i in n if i == 1] n2 = [i for i in n if i == 2] self.assertEqual(n1, [1]*2) self.assertEqual(n2, [2]*2) # Count that we got the right number of start and stop callbacks. n = [v[1] for v in self.visit] n1 = [i for i in n if i == "start"] n2 = [i for i in n if i == "stop"] self.assertEqual(n1, ["start"]*2) self.assertEqual(n2, ["stop"]*2) # Check that we got the right info dict for all callbacks for v in self.visit: info = v[2] self.assertTrue("generation" in info) self.assertTrue("collected" in info) self.assertTrue("uncollectable" in info) def test_collect_generation(self): self.preclean() gc.collect(2) for v in self.visit: info = v[2] self.assertEqual(info["generation"], 2) @cpython_only def test_collect_garbage(self): self.preclean() # Each of these cause four objects to be garbage: Two # Uncollectables and their instance dicts. Uncollectable() Uncollectable() C1055820(666) gc.collect() for v in self.visit: if v[1] != "stop": continue info = v[2] self.assertEqual(info["collected"], 2) self.assertEqual(info["uncollectable"], 8) # We should now have the Uncollectables in gc.garbage self.assertEqual(len(gc.garbage), 4) for e in gc.garbage: self.assertIsInstance(e, Uncollectable) # Now, let our callback handle the Uncollectable instances self.cleanup=True self.visit = [] gc.garbage[:] = [] gc.collect() for v in self.visit: if v[1] != "stop": continue info = v[2] self.assertEqual(info["collected"], 0) self.assertEqual(info["uncollectable"], 4) # Uncollectables should be gone self.assertEqual(len(gc.garbage), 0) @unittest.skipIf(BUILD_WITH_NDEBUG, 'built with -NDEBUG') def test_refcount_errors(self): self.preclean() # Verify the "handling" of objects with broken refcounts # Skip the test if ctypes is not available import_module("ctypes") import subprocess code = textwrap.dedent(''' from test.support import gc_collect, SuppressCrashReport a = [1, 2, 3] b = [a] # Avoid coredump when Py_FatalError() calls abort() SuppressCrashReport().__enter__() # Simulate the refcount of "a" being too low (compared to the # references held on it by live data), but keeping it above zero # (to avoid deallocating it): import ctypes ctypes.pythonapi.Py_DecRef(ctypes.py_object(a)) # The garbage collector should now have a fatal error # when it reaches the broken object gc_collect() ''') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() p.stdout.close() p.stderr.close() # Verify that stderr has a useful error message: self.assertRegex(stderr, br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.') self.assertRegex(stderr, br'refcount is too small') # "address : 0x7fb5062efc18" # "address : 7FB5062EFC18" address_regex = br'[0-9a-fA-Fx]+' self.assertRegex(stderr, br'object address : ' + address_regex) self.assertRegex(stderr, br'object refcount : 1') self.assertRegex(stderr, br'object type : ' + address_regex) self.assertRegex(stderr, br'object type name: list') self.assertRegex(stderr, br'object repr : \[1, 2, 3\]') class GCTogglingTests(unittest.TestCase): def setUp(self): gc.enable() def tearDown(self): gc.disable() def test_bug1055820c(self): # Corresponds to temp2c.py in the bug report. This is pretty # elaborate. c0 = C1055820(0) # Move c0 into generation 2. gc.collect() c1 = C1055820(1) c1.keep_c0_alive = c0 del c0.loop # now only c1 keeps c0 alive c2 = C1055820(2) c2wr = weakref.ref(c2) # no callback! ouch = [] def callback(ignored): ouch[:] = [c2wr()] # The callback gets associated with a wr on an object in generation 2. c0wr = weakref.ref(c0, callback) c0 = c1 = c2 = None # What we've set up: c0, c1, and c2 are all trash now. c0 is in # generation 2. The only thing keeping it alive is that c1 points to # it. c1 and c2 are in generation 0, and are in self-loops. There's a # global weakref to c2 (c2wr), but that weakref has no callback. # There's also a global weakref to c0 (c0wr), and that does have a # callback, and that callback references c2 via c2wr(). # # c0 has a wr with callback, which references c2wr # ^ # | # | Generation 2 above dots #. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . . # | Generation 0 below dots # | # | # ^->c1 ^->c2 has a wr but no callback # | | | | # <--v <--v # # So this is the nightmare: when generation 0 gets collected, we see # that c2 has a callback-free weakref, and c1 doesn't even have a # weakref. Collecting generation 0 doesn't see c0 at all, and c0 is # the only object that has a weakref with a callback. gc clears c1 # and c2. Clearing c1 has the side effect of dropping the refcount on # c0 to 0, so c0 goes away (despite that it's in an older generation) # and c0's wr callback triggers. That in turn materializes a reference # to c2 via c2wr(), but c2 gets cleared anyway by gc. # We want to let gc happen "naturally", to preserve the distinction # between generations. junk = [] i = 0 detector = GC_Detector() while not detector.gc_happened: i += 1 if i > 10000: self.fail("gc didn't happen after 10000 iterations") self.assertEqual(len(ouch), 0) junk.append([]) # this will eventually trigger gc self.assertEqual(len(ouch), 1) # else the callback wasn't invoked for x in ouch: # If the callback resurrected c2, the instance would be damaged, # with an empty __dict__. self.assertEqual(x, None) def test_bug1055820d(self): # Corresponds to temp2d.py in the bug report. This is very much like # test_bug1055820c, but uses a __del__ method instead of a weakref # callback to sneak in a resurrection of cyclic trash. ouch = [] class D(C1055820): def __del__(self): ouch[:] = [c2wr()] d0 = D(0) # Move all the above into generation 2. gc.collect() c1 = C1055820(1) c1.keep_d0_alive = d0 del d0.loop # now only c1 keeps d0 alive c2 = C1055820(2) c2wr = weakref.ref(c2) # no callback! d0 = c1 = c2 = None # What we've set up: d0, c1, and c2 are all trash now. d0 is in # generation 2. The only thing keeping it alive is that c1 points to # it. c1 and c2 are in generation 0, and are in self-loops. There's # a global weakref to c2 (c2wr), but that weakref has no callback. # There are no other weakrefs. # # d0 has a __del__ method that references c2wr # ^ # | # | Generation 2 above dots #. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . . # | Generation 0 below dots # | # | # ^->c1 ^->c2 has a wr but no callback # | | | | # <--v <--v # # So this is the nightmare: when generation 0 gets collected, we see # that c2 has a callback-free weakref, and c1 doesn't even have a # weakref. Collecting generation 0 doesn't see d0 at all. gc clears # c1 and c2. Clearing c1 has the side effect of dropping the refcount # on d0 to 0, so d0 goes away (despite that it's in an older # generation) and d0's __del__ triggers. That in turn materializes # a reference to c2 via c2wr(), but c2 gets cleared anyway by gc. # We want to let gc happen "naturally", to preserve the distinction # between generations. detector = GC_Detector() junk = [] i = 0 while not detector.gc_happened: i += 1 if i > 10000: self.fail("gc didn't happen after 10000 iterations") self.assertEqual(len(ouch), 0) junk.append([]) # this will eventually trigger gc self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked for x in ouch: # If __del__ resurrected c2, the instance would be damaged, with an # empty __dict__. self.assertEqual(x, None) def test_main(): enabled = gc.isenabled() gc.disable() assert not gc.isenabled() debug = gc.get_debug() gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak try: gc.collect() # Delete 2nd generation garbage run_unittest(GCTests, GCTogglingTests, GCCallbackTests) finally: gc.set_debug(debug) # test gc.enable() even if GC is disabled by default if verbose: print("restoring automatic collection") # make sure to always test gc.enable() gc.enable() assert gc.isenabled() if not enabled: gc.disable() if __name__ == "__main__": test_main()
gui.py
from tkinter import * import time import tkinter.messagebox from bot import chat import pyttsx3 import threading saved_username = ["You"] #ans=["PyBot"] window_size="400x400" class ChatInterface(Frame): # Window def __init__(self, master=None): Frame.__init__(self, master) self.master = master # sets default bg for top level windows self.tl_bg = "#EEEEEE" self.tl_bg2 = "#EEEEEE" self.tl_fg = "#000000" self.font = "Verdana 10" menu = Menu(self.master) self.master.config(menu=menu, bd=5) # Menu bar # File file = Menu(menu, tearoff=0) menu.add_cascade(label="File", menu=file) # file.add_command(label="Save Chat Log", command=self.save_chat) file.add_command(label="Clear Chat", command=self.clear_chat) # file.add_separator() file.add_command(label="Exit",command=self.chatexit) # Options options = Menu(menu, tearoff=0) menu.add_cascade(label="Options", menu=options) # username # font font = Menu(options, tearoff=0) options.add_cascade(label="Font", menu=font) font.add_command(label="Default",command=self.font_change_default) font.add_command(label="Times",command=self.font_change_times) font.add_command(label="System",command=self.font_change_system) font.add_command(label="Helvetica",command=self.font_change_helvetica) font.add_command(label="Fixedsys",command=self.font_change_fixedsys) # color theme color_theme = Menu(options, tearoff=0) options.add_cascade(label="Color Theme", menu=color_theme) color_theme.add_command(label="Default",command=self.color_theme_default) # color_theme.add_command(label="Night",command=self.) color_theme.add_command(label="Grey",command=self.color_theme_grey) color_theme.add_command(label="Blue",command=self.color_theme_dark_blue) color_theme.add_command(label="Torque",command=self.color_theme_turquoise) color_theme.add_command(label="Hacker",command=self.color_theme_hacker) # color_theme.add_command(label='Mkbhd',command=self.MKBHD) self.text_frame = Frame(self.master, bd=6) self.text_frame.pack(expand=True, fill=BOTH) # scrollbar for text box self.text_box_scrollbar = Scrollbar(self.text_frame, bd=0) self.text_box_scrollbar.pack(fill=Y, side=RIGHT) # contains messages self.text_box = Text(self.text_frame, yscrollcommand=self.text_box_scrollbar.set, state=DISABLED, bd=1, padx=6, pady=6, spacing3=8, wrap=WORD, bg=None, font="Verdana 10", relief=GROOVE, width=10, height=1) self.text_box.pack(expand=True, fill=BOTH) self.text_box_scrollbar.config(command=self.text_box.yview) # frame containing user entry field self.entry_frame = Frame(self.master, bd=1) self.entry_frame.pack(side=LEFT, fill=BOTH, expand=True) # entry field self.entry_field = Entry(self.entry_frame, bd=1, justify=LEFT) self.entry_field.pack(fill=X, padx=6, pady=6, ipady=3) # self.users_message = self.entry_field.get() # frame containing send button and emoji button self.send_button_frame = Frame(self.master, bd=0) self.send_button_frame.pack(fill=BOTH) # send button self.send_button = Button(self.send_button_frame, text="Send", width=5, relief=GROOVE, bg='white', bd=1, command=lambda: self.send_message_insert(None), activebackground="#FFFFFF", activeforeground="#000000") self.send_button.pack(side=LEFT, ipady=8) self.master.bind("<Return>", self.send_message_insert) self.last_sent_label(date="No messages sent.") #t2 = threading.Thread(target=self.send_message_insert(, name='t1') #t2.start() # voice output def playResponce(self,responce): x=pyttsx3.init() voice_id="HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0" li = [] if len(responce) > 100: if responce.find('--') == -1: b = responce.split('--') #print(b) x.setProperty('voice', voice_id) x.setProperty('rate', 120) x.setProperty('volume', 0.7) x.say(responce) x.runAndWait() def last_sent_label(self, date): try: self.sent_label.destroy() except AttributeError: pass self.sent_label = Label(self.entry_frame, font="Verdana 7", text=date, bg=self.tl_bg2, fg=self.tl_fg) self.sent_label.pack(side=LEFT, fill=X, padx=3) def clear_chat(self): self.text_box.config(state=NORMAL) self.last_sent_label(date="No messages sent.") self.text_box.delete(1.0, END) self.text_box.delete(1.0, END) self.text_box.config(state=DISABLED) def chatexit(self): exit() def send_message_insert(self, message): user_input = self.entry_field.get() pr1 = "You : " + user_input + "\n" self.text_box.configure(state=NORMAL) self.text_box.insert(END, pr1) self.text_box.configure(state=DISABLED) self.text_box.see(END) #t1 = threading.Thread(target=self.playResponce, args=(user_input,)) #t1.start() #time.sleep(1) ob=chat(user_input) pr="Bot : " + ob + "\n" self.text_box.configure(state=NORMAL) self.text_box.insert(END, pr) self.text_box.configure(state=DISABLED) self.text_box.see(END) self.last_sent_label(str(time.strftime( "Last message sent: " + '%B %d, %Y' + ' at ' + '%I:%M %p'))) self.entry_field.delete(0,END) time.sleep(0) t2 = threading.Thread(target=self.playResponce, args=(ob,)) t2.start() #return ob def font_change_default(self): self.text_box.config(font="Verdana 10") self.entry_field.config(font="Verdana 10") self.font = "Verdana 10" def font_change_times(self): self.text_box.config(font="Times") self.entry_field.config(font="Times") self.font = "Times" def font_change_system(self): self.text_box.config(font="System") self.entry_field.config(font="System") self.font = "System" def font_change_helvetica(self): self.text_box.config(font="helvetica 10") self.entry_field.config(font="helvetica 10") self.font = "helvetica 10" def font_change_fixedsys(self): self.text_box.config(font="fixedsys") self.entry_field.config(font="fixedsys") self.font = "fixedsys" def color_theme_default(self): self.master.config(bg="#EEEEEE") self.text_frame.config(bg="#EEEEEE") self.entry_frame.config(bg="#EEEEEE") self.text_box.config(bg="#FFFFFF", fg="#000000") self.entry_field.config(bg="#FFFFFF", fg="#000000", insertbackground="#000000") self.send_button_frame.config(bg="#EEEEEE") self.send_button.config(bg="#FFFFFF", fg="#000000", activebackground="#FFFFFF", activeforeground="#000000") #self.emoji_button.config(bg="#FFFFFF", fg="#000000", activebackground="#FFFFFF", activeforeground="#000000") self.sent_label.config(bg="#EEEEEE", fg="#000000") self.tl_bg = "#FFFFFF" self.tl_bg2 = "#EEEEEE" self.tl_fg = "#000000" # Dark def color_theme_dark(self): self.master.config(bg="#2a2b2d") self.text_frame.config(bg="#2a2b2d") self.text_box.config(bg="#212121", fg="#FFFFFF") self.entry_frame.config(bg="#2a2b2d") self.entry_field.config(bg="#212121", fg="#FFFFFF", insertbackground="#FFFFFF") self.send_button_frame.config(bg="#2a2b2d") self.send_button.config(bg="#212121", fg="#FFFFFF", activebackground="#212121", activeforeground="#FFFFFF") # self.emoji_button.config(bg="#212121", fg="#FFFFFF", activebackground="#212121", activeforeground="#FFFFFF") self.sent_label.config(bg="#2a2b2d", fg="#FFFFFF") self.tl_bg = "#212121" self.tl_bg2 = "#2a2b2d" self.tl_fg = "#FFFFFF" # Grey def color_theme_grey(self): self.master.config(bg="#444444") self.text_frame.config(bg="#444444") self.text_box.config(bg="#4f4f4f", fg="#ffffff") self.entry_frame.config(bg="#444444") self.entry_field.config(bg="#4f4f4f", fg="#ffffff", insertbackground="#ffffff") self.send_button_frame.config(bg="#444444") self.send_button.config(bg="#4f4f4f", fg="#ffffff", activebackground="#4f4f4f", activeforeground="#ffffff") #self.emoji_button.config(bg="#4f4f4f", fg="#ffffff", activebackground="#4f4f4f", activeforeground="#ffffff") self.sent_label.config(bg="#444444", fg="#ffffff") self.tl_bg = "#4f4f4f" self.tl_bg2 = "#444444" self.tl_fg = "#ffffff" def color_theme_turquoise(self): self.master.config(bg="#003333") self.text_frame.config(bg="#003333") self.text_box.config(bg="#669999", fg="#FFFFFF") self.entry_frame.config(bg="#003333") self.entry_field.config(bg="#669999", fg="#FFFFFF", insertbackground="#FFFFFF") self.send_button_frame.config(bg="#003333") self.send_button.config(bg="#669999", fg="#FFFFFF", activebackground="#669999", activeforeground="#FFFFFF") #self.emoji_button.config(bg="#669999", fg="#FFFFFF", activebackground="#669999", activeforeground="#FFFFFF") self.sent_label.config(bg="#003333", fg="#FFFFFF") self.tl_bg = "#669999" self.tl_bg2 = "#003333" self.tl_fg = "#FFFFFF" # Blue def color_theme_dark_blue(self): self.master.config(bg="#263b54") self.text_frame.config(bg="#263b54") self.text_box.config(bg="#1c2e44", fg="#FFFFFF") self.entry_frame.config(bg="#263b54") self.entry_field.config(bg="#1c2e44", fg="#FFFFFF", insertbackground="#FFFFFF") self.send_button_frame.config(bg="#263b54") self.send_button.config(bg="#1c2e44", fg="#FFFFFF", activebackground="#1c2e44", activeforeground="#FFFFFF") #self.emoji_button.config(bg="#1c2e44", fg="#FFFFFF", activebackground="#1c2e44", activeforeground="#FFFFFF") self.sent_label.config(bg="#263b54", fg="#FFFFFF") self.tl_bg = "#1c2e44" self.tl_bg2 = "#263b54" self.tl_fg = "#FFFFFF" # Torque def color_theme_turquoise(self): self.master.config(bg="#003333") self.text_frame.config(bg="#003333") self.text_box.config(bg="#669999", fg="#FFFFFF") self.entry_frame.config(bg="#003333") self.entry_field.config(bg="#669999", fg="#FFFFFF", insertbackground="#FFFFFF") self.send_button_frame.config(bg="#003333") self.send_button.config(bg="#669999", fg="#FFFFFF", activebackground="#669999", activeforeground="#FFFFFF") #self.emoji_button.config(bg="#669999", fg="#FFFFFF", activebackground="#669999", activeforeground="#FFFFFF") self.sent_label.config(bg="#003333", fg="#FFFFFF") self.tl_bg = "#669999" self.tl_bg2 = "#003333" self.tl_fg = "#FFFFFF" # Hacker def color_theme_hacker(self): self.master.config(bg="#0F0F0F") self.text_frame.config(bg="#0F0F0F") self.entry_frame.config(bg="#0F0F0F") self.text_box.config(bg="#0F0F0F", fg="#33FF33") self.entry_field.config(bg="#0F0F0F", fg="#33FF33", insertbackground="#33FF33") self.send_button_frame.config(bg="#0F0F0F") self.send_button.config(bg="#0F0F0F", fg="#FFFFFF", activebackground="#0F0F0F", activeforeground="#FFFFFF") #self.emoji_button.config(bg="#0F0F0F", fg="#FFFFFF", activebackground="#0F0F0F", activeforeground="#FFFFFF") self.sent_label.config(bg="#0F0F0F", fg="#33FF33") self.tl_bg = "#0F0F0F" self.tl_bg2 = "#0F0F0F" self.tl_fg = "#33FF33" # Default font and color theme def default_format(self): self.font_change_default() self.color_theme_default() root=Tk() a = ChatInterface(root) root.geometry(window_size) root.title("Bot") root.iconbitmap('i.ico') root.mainloop()
test.py
import subprocess, threading #import os import time EXE_NAME = 'compiled' EXIT_SUCCESS = 0 TIMEOUT = -15 SEGFAULT = 139 class Command(object): def __init__(self, cmd): self.cmd = cmd self.process = None def runTest(self, timeout): def target(): self.process = subprocess.Popen(self.cmd, shell=True, stderr=subprocess.PIPE) self.process.communicate() thread = threading.Thread(target=target) thread.start() startTime = time.time() thread.join(timeout) if thread.is_alive(): killRunning() endTime = time.time() if endTime - startTime > timeout: return TIMEOUT, timeout else: assert self.process.returncode != None return self.process.returncode, endTime-startTime def runCommand(self): return subprocess.Popen(self.cmd, shell=True, stderr=subprocess.PIPE).stderr.read() def test(timeLimit, caseNum, fin, fout, casesPath): prepareInput(casesPath, caseNum, fin) returnCode, timeTaken = Command('./compiled').runTest(timeLimit) reason = 'crashed' if returnCode == EXIT_SUCCESS: reason = 'correct' if correctOutput(casesPath, caseNum, fout) else 'incorrect' elif returnCode == TIMEOUT: reason = 'timeout' elif returnCode == SEGFAULT: reason = 'crashed (segfault)' cleanup(fin, fout) return reason, timeTaken def correctOutput(casesPath, caseNum, fout): return simplify(open(casesPath + ('/out/%d.out'%caseNum)).read()) == \ simplify(open(fout).read()) def cleanup(fin, fout): Command('rm %s %s' % (fin, fout)).runCommand() def killRunning(): Command('killall compiled').runCommand() def simplify(text): lines = [] for line in text.strip().split('\n'): lines.append(line.strip().split()) return lines def prepareInput(casesPath, caseNum, fin): cmdStr = 'cp %s/in/%d.in ./%s' % (casesPath, caseNum, fin) Command(cmdStr).runCommand()
test_logging.py
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Test harness for the logging module. Run all tests. Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved. """ import logging import logging.handlers import logging.config import codecs import configparser import datetime import pathlib import pickle import io import gc import json import os import queue import random import re import socket import struct import sys import tempfile from test.support.script_helper import assert_python_ok from test import support import textwrap import threading import time import unittest import warnings import weakref import asyncore from http.server import HTTPServer, BaseHTTPRequestHandler import smtpd from urllib.parse import urlparse, parse_qs from socketserver import (ThreadingUDPServer, DatagramRequestHandler, ThreadingTCPServer, StreamRequestHandler) try: import win32evtlog, win32evtlogutil, pywintypes except ImportError: win32evtlog = win32evtlogutil = pywintypes = None try: import zlib except ImportError: pass class BaseTest(unittest.TestCase): """Base class for logging tests.""" log_format = "%(name)s -> %(levelname)s: %(message)s" expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$" message_num = 0 def setUp(self): """Setup the default logging stream to an internal StringIO instance, so that we can examine log output as we want.""" self._threading_key = support.threading_setup() logger_dict = logging.getLogger().manager.loggerDict logging._acquireLock() try: self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = saved_loggers = logger_dict.copy() self.saved_name_to_level = logging._nameToLevel.copy() self.saved_level_to_name = logging._levelToName.copy() self.logger_states = logger_states = {} for name in saved_loggers: logger_states[name] = getattr(saved_loggers[name], 'disabled', None) finally: logging._releaseLock() # Set two unused loggers self.logger1 = logging.getLogger("\xab\xd7\xbb") self.logger2 = logging.getLogger("\u013f\u00d6\u0047") self.root_logger = logging.getLogger("") self.original_logging_level = self.root_logger.getEffectiveLevel() self.stream = io.StringIO() self.root_logger.setLevel(logging.DEBUG) self.root_hdlr = logging.StreamHandler(self.stream) self.root_formatter = logging.Formatter(self.log_format) self.root_hdlr.setFormatter(self.root_formatter) if self.logger1.hasHandlers(): hlist = self.logger1.handlers + self.root_logger.handlers raise AssertionError('Unexpected handlers: %s' % hlist) if self.logger2.hasHandlers(): hlist = self.logger2.handlers + self.root_logger.handlers raise AssertionError('Unexpected handlers: %s' % hlist) self.root_logger.addHandler(self.root_hdlr) self.assertTrue(self.logger1.hasHandlers()) self.assertTrue(self.logger2.hasHandlers()) def tearDown(self): """Remove our logging stream, and restore the original logging level.""" self.stream.close() self.root_logger.removeHandler(self.root_hdlr) while self.root_logger.handlers: h = self.root_logger.handlers[0] self.root_logger.removeHandler(h) h.close() self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: logging._levelToName.clear() logging._levelToName.update(self.saved_level_to_name) logging._nameToLevel.clear() logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list manager = logging.getLogger().manager manager.disable = 0 loggerDict = manager.loggerDict loggerDict.clear() loggerDict.update(self.saved_loggers) logger_states = self.logger_states for name in self.logger_states: if logger_states[name] is not None: self.saved_loggers[name].disabled = logger_states[name] finally: logging._releaseLock() self.doCleanups() support.threading_cleanup(*self._threading_key) def assert_log_lines(self, expected_values, stream=None, pat=None): """Match the collected log lines against the regular expression self.expected_log_pat, and compare the extracted group values to the expected_values list of tuples.""" stream = stream or self.stream pat = re.compile(pat or self.expected_log_pat) actual_lines = stream.getvalue().splitlines() self.assertEqual(len(actual_lines), len(expected_values)) for actual, expected in zip(actual_lines, expected_values): match = pat.search(actual) if not match: self.fail("Log line does not match expected pattern:\n" + actual) self.assertEqual(tuple(match.groups()), expected) s = stream.read() if s: self.fail("Remaining output at end of log stream:\n" + s) def next_message(self): """Generate a message consisting solely of an auto-incrementing integer.""" self.message_num += 1 return "%d" % self.message_num class BuiltinLevelsTest(BaseTest): """Test builtin levels and their inheritance.""" def test_flat(self): #Logging levels in a flat logger namespace. m = self.next_message ERR = logging.getLogger("ERR") ERR.setLevel(logging.ERROR) INF = logging.LoggerAdapter(logging.getLogger("INF"), {}) INF.setLevel(logging.INFO) DEB = logging.getLogger("DEB") DEB.setLevel(logging.DEBUG) # These should log. ERR.log(logging.CRITICAL, m()) ERR.error(m()) INF.log(logging.CRITICAL, m()) INF.error(m()) INF.warning(m()) INF.info(m()) DEB.log(logging.CRITICAL, m()) DEB.error(m()) DEB.warning(m()) DEB.info(m()) DEB.debug(m()) # These should not log. ERR.warning(m()) ERR.info(m()) ERR.debug(m()) INF.debug(m()) self.assert_log_lines([ ('ERR', 'CRITICAL', '1'), ('ERR', 'ERROR', '2'), ('INF', 'CRITICAL', '3'), ('INF', 'ERROR', '4'), ('INF', 'WARNING', '5'), ('INF', 'INFO', '6'), ('DEB', 'CRITICAL', '7'), ('DEB', 'ERROR', '8'), ('DEB', 'WARNING', '9'), ('DEB', 'INFO', '10'), ('DEB', 'DEBUG', '11'), ]) def test_nested_explicit(self): # Logging levels in a nested namespace, all explicitly set. m = self.next_message INF = logging.getLogger("INF") INF.setLevel(logging.INFO) INF_ERR = logging.getLogger("INF.ERR") INF_ERR.setLevel(logging.ERROR) # These should log. INF_ERR.log(logging.CRITICAL, m()) INF_ERR.error(m()) # These should not log. INF_ERR.warning(m()) INF_ERR.info(m()) INF_ERR.debug(m()) self.assert_log_lines([ ('INF.ERR', 'CRITICAL', '1'), ('INF.ERR', 'ERROR', '2'), ]) def test_nested_inherited(self): #Logging levels in a nested namespace, inherited from parent loggers. m = self.next_message INF = logging.getLogger("INF") INF.setLevel(logging.INFO) INF_ERR = logging.getLogger("INF.ERR") INF_ERR.setLevel(logging.ERROR) INF_UNDEF = logging.getLogger("INF.UNDEF") INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF") UNDEF = logging.getLogger("UNDEF") # These should log. INF_UNDEF.log(logging.CRITICAL, m()) INF_UNDEF.error(m()) INF_UNDEF.warning(m()) INF_UNDEF.info(m()) INF_ERR_UNDEF.log(logging.CRITICAL, m()) INF_ERR_UNDEF.error(m()) # These should not log. INF_UNDEF.debug(m()) INF_ERR_UNDEF.warning(m()) INF_ERR_UNDEF.info(m()) INF_ERR_UNDEF.debug(m()) self.assert_log_lines([ ('INF.UNDEF', 'CRITICAL', '1'), ('INF.UNDEF', 'ERROR', '2'), ('INF.UNDEF', 'WARNING', '3'), ('INF.UNDEF', 'INFO', '4'), ('INF.ERR.UNDEF', 'CRITICAL', '5'), ('INF.ERR.UNDEF', 'ERROR', '6'), ]) def test_nested_with_virtual_parent(self): # Logging levels when some parent does not exist yet. m = self.next_message INF = logging.getLogger("INF") GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF") CHILD = logging.getLogger("INF.BADPARENT") INF.setLevel(logging.INFO) # These should log. GRANDCHILD.log(logging.FATAL, m()) GRANDCHILD.info(m()) CHILD.log(logging.FATAL, m()) CHILD.info(m()) # These should not log. GRANDCHILD.debug(m()) CHILD.debug(m()) self.assert_log_lines([ ('INF.BADPARENT.UNDEF', 'CRITICAL', '1'), ('INF.BADPARENT.UNDEF', 'INFO', '2'), ('INF.BADPARENT', 'CRITICAL', '3'), ('INF.BADPARENT', 'INFO', '4'), ]) def test_regression_22386(self): """See issue #22386 for more information.""" self.assertEqual(logging.getLevelName('INFO'), logging.INFO) self.assertEqual(logging.getLevelName(logging.INFO), 'INFO') def test_regression_29220(self): """See issue #29220 for more information.""" logging.addLevelName(logging.INFO, '') self.addCleanup(logging.addLevelName, logging.INFO, 'INFO') self.assertEqual(logging.getLevelName(logging.INFO), '') def test_issue27935(self): fatal = logging.getLevelName('FATAL') self.assertEqual(fatal, logging.FATAL) def test_regression_29220(self): """See issue #29220 for more information.""" logging.addLevelName(logging.INFO, '') self.addCleanup(logging.addLevelName, logging.INFO, 'INFO') self.assertEqual(logging.getLevelName(logging.INFO), '') self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET') self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET) class BasicFilterTest(BaseTest): """Test the bundled Filter class.""" def test_filter(self): # Only messages satisfying the specified criteria pass through the # filter. filter_ = logging.Filter("spam.eggs") handler = self.root_logger.handlers[0] try: handler.addFilter(filter_) spam = logging.getLogger("spam") spam_eggs = logging.getLogger("spam.eggs") spam_eggs_fish = logging.getLogger("spam.eggs.fish") spam_bakedbeans = logging.getLogger("spam.bakedbeans") spam.info(self.next_message()) spam_eggs.info(self.next_message()) # Good. spam_eggs_fish.info(self.next_message()) # Good. spam_bakedbeans.info(self.next_message()) self.assert_log_lines([ ('spam.eggs', 'INFO', '2'), ('spam.eggs.fish', 'INFO', '3'), ]) finally: handler.removeFilter(filter_) def test_callable_filter(self): # Only messages satisfying the specified criteria pass through the # filter. def filterfunc(record): parts = record.name.split('.') prefix = '.'.join(parts[:2]) return prefix == 'spam.eggs' handler = self.root_logger.handlers[0] try: handler.addFilter(filterfunc) spam = logging.getLogger("spam") spam_eggs = logging.getLogger("spam.eggs") spam_eggs_fish = logging.getLogger("spam.eggs.fish") spam_bakedbeans = logging.getLogger("spam.bakedbeans") spam.info(self.next_message()) spam_eggs.info(self.next_message()) # Good. spam_eggs_fish.info(self.next_message()) # Good. spam_bakedbeans.info(self.next_message()) self.assert_log_lines([ ('spam.eggs', 'INFO', '2'), ('spam.eggs.fish', 'INFO', '3'), ]) finally: handler.removeFilter(filterfunc) def test_empty_filter(self): f = logging.Filter() r = logging.makeLogRecord({'name': 'spam.eggs'}) self.assertTrue(f.filter(r)) # # First, we define our levels. There can be as many as you want - the only # limitations are that they should be integers, the lowest should be > 0 and # larger values mean less information being logged. If you need specific # level values which do not fit into these limitations, you can use a # mapping dictionary to convert between your application levels and the # logging system. # SILENT = 120 TACITURN = 119 TERSE = 118 EFFUSIVE = 117 SOCIABLE = 116 VERBOSE = 115 TALKATIVE = 114 GARRULOUS = 113 CHATTERBOX = 112 BORING = 111 LEVEL_RANGE = range(BORING, SILENT + 1) # # Next, we define names for our levels. You don't need to do this - in which # case the system will use "Level n" to denote the text for the level. # my_logging_levels = { SILENT : 'Silent', TACITURN : 'Taciturn', TERSE : 'Terse', EFFUSIVE : 'Effusive', SOCIABLE : 'Sociable', VERBOSE : 'Verbose', TALKATIVE : 'Talkative', GARRULOUS : 'Garrulous', CHATTERBOX : 'Chatterbox', BORING : 'Boring', } class GarrulousFilter(logging.Filter): """A filter which blocks garrulous messages.""" def filter(self, record): return record.levelno != GARRULOUS class VerySpecificFilter(logging.Filter): """A filter which blocks sociable and taciturn messages.""" def filter(self, record): return record.levelno not in [SOCIABLE, TACITURN] class CustomLevelsAndFiltersTest(BaseTest): """Test various filtering possibilities with custom logging levels.""" # Skip the logger name group. expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$" def setUp(self): BaseTest.setUp(self) for k, v in my_logging_levels.items(): logging.addLevelName(k, v) def log_at_all_levels(self, logger): for lvl in LEVEL_RANGE: logger.log(lvl, self.next_message()) def test_logger_filter(self): # Filter at logger level. self.root_logger.setLevel(VERBOSE) # Levels >= 'Verbose' are good. self.log_at_all_levels(self.root_logger) self.assert_log_lines([ ('Verbose', '5'), ('Sociable', '6'), ('Effusive', '7'), ('Terse', '8'), ('Taciturn', '9'), ('Silent', '10'), ]) def test_handler_filter(self): # Filter at handler level. self.root_logger.handlers[0].setLevel(SOCIABLE) try: # Levels >= 'Sociable' are good. self.log_at_all_levels(self.root_logger) self.assert_log_lines([ ('Sociable', '6'), ('Effusive', '7'), ('Terse', '8'), ('Taciturn', '9'), ('Silent', '10'), ]) finally: self.root_logger.handlers[0].setLevel(logging.NOTSET) def test_specific_filters(self): # Set a specific filter object on the handler, and then add another # filter object on the logger itself. handler = self.root_logger.handlers[0] specific_filter = None garr = GarrulousFilter() handler.addFilter(garr) try: self.log_at_all_levels(self.root_logger) first_lines = [ # Notice how 'Garrulous' is missing ('Boring', '1'), ('Chatterbox', '2'), ('Talkative', '4'), ('Verbose', '5'), ('Sociable', '6'), ('Effusive', '7'), ('Terse', '8'), ('Taciturn', '9'), ('Silent', '10'), ] self.assert_log_lines(first_lines) specific_filter = VerySpecificFilter() self.root_logger.addFilter(specific_filter) self.log_at_all_levels(self.root_logger) self.assert_log_lines(first_lines + [ # Not only 'Garrulous' is still missing, but also 'Sociable' # and 'Taciturn' ('Boring', '11'), ('Chatterbox', '12'), ('Talkative', '14'), ('Verbose', '15'), ('Effusive', '17'), ('Terse', '18'), ('Silent', '20'), ]) finally: if specific_filter: self.root_logger.removeFilter(specific_filter) handler.removeFilter(garr) class HandlerTest(BaseTest): def test_name(self): h = logging.Handler() h.name = 'generic' self.assertEqual(h.name, 'generic') h.name = 'anothergeneric' self.assertEqual(h.name, 'anothergeneric') self.assertRaises(NotImplementedError, h.emit, None) def test_builtin_handlers(self): # We can't actually *use* too many handlers in the tests, # but we can try instantiating them with various options if sys.platform in ('linux', 'darwin'): for existing in (True, False): fd, fn = tempfile.mkstemp() os.close(fd) if not existing: os.unlink(fn) h = logging.handlers.WatchedFileHandler(fn, delay=True) if existing: dev, ino = h.dev, h.ino self.assertEqual(dev, -1) self.assertEqual(ino, -1) r = logging.makeLogRecord({'msg': 'Test'}) h.handle(r) # Now remove the file. os.unlink(fn) self.assertFalse(os.path.exists(fn)) # The next call should recreate the file. h.handle(r) self.assertTrue(os.path.exists(fn)) else: self.assertEqual(h.dev, -1) self.assertEqual(h.ino, -1) h.close() if existing: os.unlink(fn) if sys.platform == 'darwin': sockname = '/var/run/syslog' else: sockname = '/dev/log' try: h = logging.handlers.SysLogHandler(sockname) self.assertEqual(h.facility, h.LOG_USER) self.assertTrue(h.unixsocket) h.close() except OSError: # syslogd might not be available pass for method in ('GET', 'POST', 'PUT'): if method == 'PUT': self.assertRaises(ValueError, logging.handlers.HTTPHandler, 'localhost', '/log', method) else: h = logging.handlers.HTTPHandler('localhost', '/log', method) h.close() h = logging.handlers.BufferingHandler(0) r = logging.makeLogRecord({}) self.assertTrue(h.shouldFlush(r)) h.close() h = logging.handlers.BufferingHandler(1) self.assertFalse(h.shouldFlush(r)) h.close() def test_path_objects(self): """ Test that Path objects are accepted as filename arguments to handlers. See Issue #27493. """ fd, fn = tempfile.mkstemp() os.close(fd) os.unlink(fn) pfn = pathlib.Path(fn) cases = ( (logging.FileHandler, (pfn, 'w')), (logging.handlers.RotatingFileHandler, (pfn, 'a')), (logging.handlers.TimedRotatingFileHandler, (pfn, 'h')), ) if sys.platform in ('linux', 'darwin'): cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),) for cls, args in cases: h = cls(*args) self.assertTrue(os.path.exists(fn)) h.close() os.unlink(fn) @unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.') def test_race(self): # Issue #14632 refers. def remove_loop(fname, tries): for _ in range(tries): try: os.unlink(fname) self.deletion_time = time.time() except OSError: pass time.sleep(0.004 * random.randint(0, 4)) del_count = 500 log_count = 500 self.handle_time = None self.deletion_time = None for delay in (False, True): fd, fn = tempfile.mkstemp('.log', 'test_logging-3-') os.close(fd) remover = threading.Thread(target=remove_loop, args=(fn, del_count)) remover.daemon = True remover.start() h = logging.handlers.WatchedFileHandler(fn, delay=delay) f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s') h.setFormatter(f) try: for _ in range(log_count): time.sleep(0.005) r = logging.makeLogRecord({'msg': 'testing' }) try: self.handle_time = time.time() h.handle(r) except Exception: print('Deleted at %s, ' 'opened at %s' % (self.deletion_time, self.handle_time)) raise finally: remover.join() h.close() if os.path.exists(fn): os.unlink(fn) class BadStream(object): def write(self, data): raise RuntimeError('deliberate mistake') class TestStreamHandler(logging.StreamHandler): def handleError(self, record): self.error_record = record class StreamHandlerTest(BaseTest): def test_error_handling(self): h = TestStreamHandler(BadStream()) r = logging.makeLogRecord({}) old_raise = logging.raiseExceptions try: h.handle(r) self.assertIs(h.error_record, r) h = logging.StreamHandler(BadStream()) with support.captured_stderr() as stderr: h.handle(r) msg = '\nRuntimeError: deliberate mistake\n' self.assertIn(msg, stderr.getvalue()) logging.raiseExceptions = False with support.captured_stderr() as stderr: h.handle(r) self.assertEqual('', stderr.getvalue()) finally: logging.raiseExceptions = old_raise def test_stream_setting(self): """ Test setting the handler's stream """ h = logging.StreamHandler() stream = io.StringIO() old = h.setStream(stream) self.assertIs(old, sys.stderr) actual = h.setStream(old) self.assertIs(actual, stream) # test that setting to existing value returns None actual = h.setStream(old) self.assertIsNone(actual) # -- The following section could be moved into a server_helper.py module # -- if it proves to be of wider utility than just test_logging class TestSMTPServer(smtpd.SMTPServer): """ This class implements a test SMTP server. :param addr: A (host, port) tuple which the server listens on. You can specify a port value of zero: the server's *port* attribute will hold the actual port number used, which can be used in client connections. :param handler: A callable which will be called to process incoming messages. The handler will be passed the client address tuple, who the message is from, a list of recipients and the message data. :param poll_interval: The interval, in seconds, used in the underlying :func:`select` or :func:`poll` call by :func:`asyncore.loop`. :param sockmap: A dictionary which will be used to hold :class:`asyncore.dispatcher` instances used by :func:`asyncore.loop`. This avoids changing the :mod:`asyncore` module's global state. """ def __init__(self, addr, handler, poll_interval, sockmap): smtpd.SMTPServer.__init__(self, addr, None, map=sockmap, decode_data=True) self.port = self.socket.getsockname()[1] self._handler = handler self._thread = None self.poll_interval = poll_interval def process_message(self, peer, mailfrom, rcpttos, data): """ Delegates to the handler passed in to the server's constructor. Typically, this will be a test case method. :param peer: The client (host, port) tuple. :param mailfrom: The address of the sender. :param rcpttos: The addresses of the recipients. :param data: The message. """ self._handler(peer, mailfrom, rcpttos, data) def start(self): """ Start the server running on a separate daemon thread. """ self._thread = t = threading.Thread(target=self.serve_forever, args=(self.poll_interval,)) t.setDaemon(True) t.start() def serve_forever(self, poll_interval): """ Run the :mod:`asyncore` loop until normal termination conditions arise. :param poll_interval: The interval, in seconds, used in the underlying :func:`select` or :func:`poll` call by :func:`asyncore.loop`. """ try: asyncore.loop(poll_interval, map=self._map) except OSError: # On FreeBSD 8, closing the server repeatably # raises this error. We swallow it if the # server has been closed. if self.connected or self.accepting: raise def stop(self, timeout=None): """ Stop the thread by closing the server instance. Wait for the server thread to terminate. :param timeout: How long to wait for the server thread to terminate. """ self.close() support.join_thread(self._thread, timeout) self._thread = None asyncore.close_all(map=self._map, ignore_all=True) class ControlMixin(object): """ This mixin is used to start a server on a separate thread, and shut it down programmatically. Request handling is simplified - instead of needing to derive a suitable RequestHandler subclass, you just provide a callable which will be passed each received request to be processed. :param handler: A handler callable which will be called with a single parameter - the request - in order to process the request. This handler is called on the server thread, effectively meaning that requests are processed serially. While not quite Web scale ;-), this should be fine for testing applications. :param poll_interval: The polling interval in seconds. """ def __init__(self, handler, poll_interval): self._thread = None self.poll_interval = poll_interval self._handler = handler self.ready = threading.Event() def start(self): """ Create a daemon thread to run the server, and start it. """ self._thread = t = threading.Thread(target=self.serve_forever, args=(self.poll_interval,)) t.setDaemon(True) t.start() def serve_forever(self, poll_interval): """ Run the server. Set the ready flag before entering the service loop. """ self.ready.set() super(ControlMixin, self).serve_forever(poll_interval) def stop(self, timeout=None): """ Tell the server thread to stop, and wait for it to do so. :param timeout: How long to wait for the server thread to terminate. """ self.shutdown() if self._thread is not None: support.join_thread(self._thread, timeout) self._thread = None self.server_close() self.ready.clear() class TestHTTPServer(ControlMixin, HTTPServer): """ An HTTP server which is controllable using :class:`ControlMixin`. :param addr: A tuple with the IP address and port to listen on. :param handler: A handler callable which will be called with a single parameter - the request - in order to process the request. :param poll_interval: The polling interval in seconds. :param log: Pass ``True`` to enable log messages. """ def __init__(self, addr, handler, poll_interval=0.5, log=False, sslctx=None): class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler): def __getattr__(self, name, default=None): if name.startswith('do_'): return self.process_request raise AttributeError(name) def process_request(self): self.server._handler(self) def log_message(self, format, *args): if log: super(DelegatingHTTPRequestHandler, self).log_message(format, *args) HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler) ControlMixin.__init__(self, handler, poll_interval) self.sslctx = sslctx def get_request(self): try: sock, addr = self.socket.accept() if self.sslctx: sock = self.sslctx.wrap_socket(sock, server_side=True) except OSError as e: # socket errors are silenced by the caller, print them here sys.stderr.write("Got an error:\n%s\n" % e) raise return sock, addr class TestTCPServer(ControlMixin, ThreadingTCPServer): """ A TCP server which is controllable using :class:`ControlMixin`. :param addr: A tuple with the IP address and port to listen on. :param handler: A handler callable which will be called with a single parameter - the request - in order to process the request. :param poll_interval: The polling interval in seconds. :bind_and_activate: If True (the default), binds the server and starts it listening. If False, you need to call :meth:`server_bind` and :meth:`server_activate` at some later time before calling :meth:`start`, so that the server will set up the socket and listen on it. """ allow_reuse_address = True def __init__(self, addr, handler, poll_interval=0.5, bind_and_activate=True): class DelegatingTCPRequestHandler(StreamRequestHandler): def handle(self): self.server._handler(self) ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler, bind_and_activate) ControlMixin.__init__(self, handler, poll_interval) def server_bind(self): super(TestTCPServer, self).server_bind() self.port = self.socket.getsockname()[1] class TestUDPServer(ControlMixin, ThreadingUDPServer): """ A UDP server which is controllable using :class:`ControlMixin`. :param addr: A tuple with the IP address and port to listen on. :param handler: A handler callable which will be called with a single parameter - the request - in order to process the request. :param poll_interval: The polling interval for shutdown requests, in seconds. :bind_and_activate: If True (the default), binds the server and starts it listening. If False, you need to call :meth:`server_bind` and :meth:`server_activate` at some later time before calling :meth:`start`, so that the server will set up the socket and listen on it. """ def __init__(self, addr, handler, poll_interval=0.5, bind_and_activate=True): class DelegatingUDPRequestHandler(DatagramRequestHandler): def handle(self): self.server._handler(self) def finish(self): data = self.wfile.getvalue() if data: try: super(DelegatingUDPRequestHandler, self).finish() except OSError: if not self.server._closed: raise ThreadingUDPServer.__init__(self, addr, DelegatingUDPRequestHandler, bind_and_activate) ControlMixin.__init__(self, handler, poll_interval) self._closed = False def server_bind(self): super(TestUDPServer, self).server_bind() self.port = self.socket.getsockname()[1] def server_close(self): super(TestUDPServer, self).server_close() self._closed = True if hasattr(socket, "AF_UNIX"): class TestUnixStreamServer(TestTCPServer): address_family = socket.AF_UNIX class TestUnixDatagramServer(TestUDPServer): address_family = socket.AF_UNIX # - end of server_helper section class SMTPHandlerTest(BaseTest): TIMEOUT = 8.0 def test_basic(self): sockmap = {} server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001, sockmap) server.start() addr = (support.HOST, server.port) h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log', timeout=self.TIMEOUT) self.assertEqual(h.toaddrs, ['you']) self.messages = [] r = logging.makeLogRecord({'msg': 'Hello \u2713'}) self.handled = threading.Event() h.handle(r) self.handled.wait(self.TIMEOUT) # 14314: don't wait forever server.stop() self.assertTrue(self.handled.is_set()) self.assertEqual(len(self.messages), 1) peer, mailfrom, rcpttos, data = self.messages[0] self.assertEqual(mailfrom, 'me') self.assertEqual(rcpttos, ['you']) self.assertIn('\nSubject: Log\n', data) self.assertTrue(data.endswith('\n\nHello \u2713')) h.close() def process_message(self, *args): self.messages.append(args) self.handled.set() class MemoryHandlerTest(BaseTest): """Tests for the MemoryHandler.""" # Do not bother with a logger name group. expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$" def setUp(self): BaseTest.setUp(self) self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING, self.root_hdlr) self.mem_logger = logging.getLogger('mem') self.mem_logger.propagate = 0 self.mem_logger.addHandler(self.mem_hdlr) def tearDown(self): self.mem_hdlr.close() BaseTest.tearDown(self) def test_flush(self): # The memory handler flushes to its target handler based on specific # criteria (message count and message level). self.mem_logger.debug(self.next_message()) self.assert_log_lines([]) self.mem_logger.info(self.next_message()) self.assert_log_lines([]) # This will flush because the level is >= logging.WARNING self.mem_logger.warning(self.next_message()) lines = [ ('DEBUG', '1'), ('INFO', '2'), ('WARNING', '3'), ] self.assert_log_lines(lines) for n in (4, 14): for i in range(9): self.mem_logger.debug(self.next_message()) self.assert_log_lines(lines) # This will flush because it's the 10th message since the last # flush. self.mem_logger.debug(self.next_message()) lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)] self.assert_log_lines(lines) self.mem_logger.debug(self.next_message()) self.assert_log_lines(lines) def test_flush_on_close(self): """ Test that the flush-on-close configuration works as expected. """ self.mem_logger.debug(self.next_message()) self.assert_log_lines([]) self.mem_logger.info(self.next_message()) self.assert_log_lines([]) self.mem_logger.removeHandler(self.mem_hdlr) # Default behaviour is to flush on close. Check that it happens. self.mem_hdlr.close() lines = [ ('DEBUG', '1'), ('INFO', '2'), ] self.assert_log_lines(lines) # Now configure for flushing not to be done on close. self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING, self.root_hdlr, False) self.mem_logger.addHandler(self.mem_hdlr) self.mem_logger.debug(self.next_message()) self.assert_log_lines(lines) # no change self.mem_logger.info(self.next_message()) self.assert_log_lines(lines) # no change self.mem_logger.removeHandler(self.mem_hdlr) self.mem_hdlr.close() # assert that no new lines have been added self.assert_log_lines(lines) # no change class ExceptionFormatter(logging.Formatter): """A special exception formatter.""" def formatException(self, ei): return "Got a [%s]" % ei[0].__name__ class ConfigFileTest(BaseTest): """Reading logging config from a .ini-style config file.""" expected_log_pat = r"^(\w+) \+\+ (\w+)$" # config0 is a standard configuration. config0 = """ [loggers] keys=root [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers=hand1 [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ # config1 adds a little to the standard configuration. config1 = """ [loggers] keys=root,parser [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers= [logger_parser] level=DEBUG handlers=hand1 propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ # config1a moves the handler to the root. config1a = """ [loggers] keys=root,parser [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers=hand1 [logger_parser] level=DEBUG handlers= propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ # config2 has a subtle configuration error that should be reported config2 = config1.replace("sys.stdout", "sys.stbout") # config3 has a less subtle configuration error config3 = config1.replace("formatter=form1", "formatter=misspelled_name") # config4 specifies a custom formatter class to be loaded config4 = """ [loggers] keys=root [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=NOTSET handlers=hand1 [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] class=""" + __name__ + """.ExceptionFormatter format=%(levelname)s:%(name)s:%(message)s datefmt= """ # config5 specifies a custom handler class to be loaded config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler') # config6 uses ', ' delimiters in the handlers and formatters sections config6 = """ [loggers] keys=root,parser [handlers] keys=hand1, hand2 [formatters] keys=form1, form2 [logger_root] level=WARNING handlers= [logger_parser] level=DEBUG handlers=hand1 propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [handler_hand2] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stderr,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= [formatter_form2] format=%(message)s datefmt= """ # config7 adds a compiler logger, and uses kwargs instead of args. config7 = """ [loggers] keys=root,parser,compiler [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers=hand1 [logger_compiler] level=DEBUG handlers= propagate=1 qualname=compiler [logger_parser] level=DEBUG handlers= propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 kwargs={'stream': sys.stdout,} [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ disable_test = """ [loggers] keys=root [handlers] keys=screen [formatters] keys= [logger_root] level=DEBUG handlers=screen [handler_screen] level=DEBUG class=StreamHandler args=(sys.stdout,) formatter= """ def apply_config(self, conf, **kwargs): file = io.StringIO(textwrap.dedent(conf)) logging.config.fileConfig(file, **kwargs) def test_config0_ok(self): # A simple config file which overrides the default settings. with support.captured_stdout() as output: self.apply_config(self.config0) logger = logging.getLogger() # Won't output anything logger.info(self.next_message()) # Outputs a message logger.error(self.next_message()) self.assert_log_lines([ ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_config0_using_cp_ok(self): # A simple config file which overrides the default settings. with support.captured_stdout() as output: file = io.StringIO(textwrap.dedent(self.config0)) cp = configparser.ConfigParser() cp.read_file(file) logging.config.fileConfig(cp) logger = logging.getLogger() # Won't output anything logger.info(self.next_message()) # Outputs a message logger.error(self.next_message()) self.assert_log_lines([ ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_config1_ok(self, config=config1): # A config file defining a sub-parser as well. with support.captured_stdout() as output: self.apply_config(config) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_config2_failure(self): # A simple config file which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2) def test_config3_failure(self): # A simple config file which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config3) def test_config4_ok(self): # A config file specifying a custom formatter class. with support.captured_stdout() as output: self.apply_config(self.config4) logger = logging.getLogger() try: raise RuntimeError() except RuntimeError: logging.exception("just testing") sys.stdout.seek(0) self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output is empty self.assert_log_lines([]) def test_config5_ok(self): self.test_config1_ok(config=self.config5) def test_config6_ok(self): self.test_config1_ok(config=self.config6) def test_config7_ok(self): with support.captured_stdout() as output: self.apply_config(self.config1a) logger = logging.getLogger("compiler.parser") # See issue #11424. compiler-hyphenated sorts # between compiler and compiler.xyz and this # was preventing compiler.xyz from being included # in the child loggers of compiler because of an # overzealous loop termination condition. hyphenated = logging.getLogger('compiler-hyphenated') # All will output a message logger.info(self.next_message()) logger.error(self.next_message()) hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ('CRITICAL', '3'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) with support.captured_stdout() as output: self.apply_config(self.config7) logger = logging.getLogger("compiler.parser") self.assertFalse(logger.disabled) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) # Will not appear hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '4'), ('ERROR', '5'), ('INFO', '6'), ('ERROR', '7'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_logger_disabling(self): self.apply_config(self.disable_test) logger = logging.getLogger('some_pristine_logger') self.assertFalse(logger.disabled) self.apply_config(self.disable_test) self.assertTrue(logger.disabled) self.apply_config(self.disable_test, disable_existing_loggers=False) self.assertFalse(logger.disabled) class SocketHandlerTest(BaseTest): """Test for SocketHandler objects.""" server_class = TestTCPServer address = ('localhost', 0) def setUp(self): """Set up a TCP server to receive log messages, and a SocketHandler pointing to that server's address and port.""" BaseTest.setUp(self) # Issue #29177: deal with errors that happen during setup self.server = self.sock_hdlr = self.server_exception = None try: self.server = server = self.server_class(self.address, self.handle_socket, 0.01) server.start() # Uncomment next line to test error recovery in setUp() # raise OSError('dummy error raised') except OSError as e: self.server_exception = e return server.ready.wait() hcls = logging.handlers.SocketHandler if isinstance(server.server_address, tuple): self.sock_hdlr = hcls('localhost', server.port) else: self.sock_hdlr = hcls(server.server_address, None) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sock_hdlr) self.handled = threading.Semaphore(0) def tearDown(self): """Shutdown the TCP server.""" try: if self.sock_hdlr: self.root_logger.removeHandler(self.sock_hdlr) self.sock_hdlr.close() if self.server: self.server.stop(2.0) finally: BaseTest.tearDown(self) def handle_socket(self, request): conn = request.connection while True: chunk = conn.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = conn.recv(slen) while len(chunk) < slen: chunk = chunk + conn.recv(slen - len(chunk)) obj = pickle.loads(chunk) record = logging.makeLogRecord(obj) self.log_output += record.msg + '\n' self.handled.release() def test_output(self): # The log message sent to the SocketHandler is properly received. if self.server_exception: self.skipTest(self.server_exception) logger = logging.getLogger("tcp") logger.error("spam") self.handled.acquire() logger.debug("eggs") self.handled.acquire() self.assertEqual(self.log_output, "spam\neggs\n") def test_noserver(self): if self.server_exception: self.skipTest(self.server_exception) # Avoid timing-related failures due to SocketHandler's own hard-wired # one-second timeout on socket.create_connection() (issue #16264). self.sock_hdlr.retryStart = 2.5 # Kill the server self.server.stop(2.0) # The logging call should try to connect, which should fail try: raise RuntimeError('Deliberate mistake') except RuntimeError: self.root_logger.exception('Never sent') self.root_logger.error('Never sent, either') now = time.time() self.assertGreater(self.sock_hdlr.retryTime, now) time.sleep(self.sock_hdlr.retryTime - now + 0.001) self.root_logger.error('Nor this') def _get_temp_domain_socket(): fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock') os.close(fd) # just need a name - file can't be present, or we'll get an # 'address already in use' error. os.remove(fn) return fn @unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required") class UnixSocketHandlerTest(SocketHandlerTest): """Test for SocketHandler with unix sockets.""" if hasattr(socket, "AF_UNIX"): server_class = TestUnixStreamServer def setUp(self): # override the definition in the base class self.address = _get_temp_domain_socket() SocketHandlerTest.setUp(self) def tearDown(self): SocketHandlerTest.tearDown(self) support.unlink(self.address) class DatagramHandlerTest(BaseTest): """Test for DatagramHandler.""" server_class = TestUDPServer address = ('localhost', 0) def setUp(self): """Set up a UDP server to receive log messages, and a DatagramHandler pointing to that server's address and port.""" BaseTest.setUp(self) # Issue #29177: deal with errors that happen during setup self.server = self.sock_hdlr = self.server_exception = None try: self.server = server = self.server_class(self.address, self.handle_datagram, 0.01) server.start() # Uncomment next line to test error recovery in setUp() # raise OSError('dummy error raised') except OSError as e: self.server_exception = e return server.ready.wait() hcls = logging.handlers.DatagramHandler if isinstance(server.server_address, tuple): self.sock_hdlr = hcls('localhost', server.port) else: self.sock_hdlr = hcls(server.server_address, None) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sock_hdlr) self.handled = threading.Event() def tearDown(self): """Shutdown the UDP server.""" try: if self.server: self.server.stop(2.0) if self.sock_hdlr: self.root_logger.removeHandler(self.sock_hdlr) self.sock_hdlr.close() finally: BaseTest.tearDown(self) def handle_datagram(self, request): slen = struct.pack('>L', 0) # length of prefix packet = request.packet[len(slen):] obj = pickle.loads(packet) record = logging.makeLogRecord(obj) self.log_output += record.msg + '\n' self.handled.set() def test_output(self): # The log message sent to the DatagramHandler is properly received. if self.server_exception: self.skipTest(self.server_exception) logger = logging.getLogger("udp") logger.error("spam") self.handled.wait() self.handled.clear() logger.error("eggs") self.handled.wait() self.assertEqual(self.log_output, "spam\neggs\n") @unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required") class UnixDatagramHandlerTest(DatagramHandlerTest): """Test for DatagramHandler using Unix sockets.""" if hasattr(socket, "AF_UNIX"): server_class = TestUnixDatagramServer def setUp(self): # override the definition in the base class self.address = _get_temp_domain_socket() DatagramHandlerTest.setUp(self) def tearDown(self): DatagramHandlerTest.tearDown(self) support.unlink(self.address) class SysLogHandlerTest(BaseTest): """Test for SysLogHandler using UDP.""" server_class = TestUDPServer address = ('localhost', 0) def setUp(self): """Set up a UDP server to receive log messages, and a SysLogHandler pointing to that server's address and port.""" BaseTest.setUp(self) # Issue #29177: deal with errors that happen during setup self.server = self.sl_hdlr = self.server_exception = None try: self.server = server = self.server_class(self.address, self.handle_datagram, 0.01) server.start() # Uncomment next line to test error recovery in setUp() # raise OSError('dummy error raised') except OSError as e: self.server_exception = e return server.ready.wait() hcls = logging.handlers.SysLogHandler if isinstance(server.server_address, tuple): self.sl_hdlr = hcls((server.server_address[0], server.port)) else: self.sl_hdlr = hcls(server.server_address) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sl_hdlr) self.handled = threading.Event() def tearDown(self): """Shutdown the server.""" try: if self.server: self.server.stop(2.0) if self.sl_hdlr: self.root_logger.removeHandler(self.sl_hdlr) self.sl_hdlr.close() finally: BaseTest.tearDown(self) def handle_datagram(self, request): self.log_output = request.packet self.handled.set() def test_output(self): if self.server_exception: self.skipTest(self.server_exception) # The log message sent to the SysLogHandler is properly received. logger = logging.getLogger("slh") logger.error("sp\xe4m") self.handled.wait() self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00') self.handled.clear() self.sl_hdlr.append_nul = False logger.error("sp\xe4m") self.handled.wait() self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m') self.handled.clear() self.sl_hdlr.ident = "h\xe4m-" logger.error("sp\xe4m") self.handled.wait() self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m') @unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required") class UnixSysLogHandlerTest(SysLogHandlerTest): """Test for SysLogHandler with Unix sockets.""" if hasattr(socket, "AF_UNIX"): server_class = TestUnixDatagramServer def setUp(self): # override the definition in the base class self.address = _get_temp_domain_socket() SysLogHandlerTest.setUp(self) def tearDown(self): SysLogHandlerTest.tearDown(self) support.unlink(self.address) @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required for this test.') class IPv6SysLogHandlerTest(SysLogHandlerTest): """Test for SysLogHandler with IPv6 host.""" server_class = TestUDPServer address = ('::1', 0) def setUp(self): self.server_class.address_family = socket.AF_INET6 super(IPv6SysLogHandlerTest, self).setUp() def tearDown(self): self.server_class.address_family = socket.AF_INET super(IPv6SysLogHandlerTest, self).tearDown() class HTTPHandlerTest(BaseTest): """Test for HTTPHandler.""" def setUp(self): """Set up an HTTP server to receive log messages, and a HTTPHandler pointing to that server's address and port.""" BaseTest.setUp(self) self.handled = threading.Event() def handle_request(self, request): self.command = request.command self.log_data = urlparse(request.path) if self.command == 'POST': try: rlen = int(request.headers['Content-Length']) self.post_data = request.rfile.read(rlen) except: self.post_data = None request.send_response(200) request.end_headers() self.handled.set() def test_output(self): # The log message sent to the HTTPHandler is properly received. logger = logging.getLogger("http") root_logger = self.root_logger root_logger.removeHandler(self.root_logger.handlers[0]) for secure in (False, True): addr = ('localhost', 0) if secure: try: import ssl except ImportError: sslctx = None else: here = os.path.dirname(__file__) localhost_cert = os.path.join(here, "keycert.pem") sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.load_cert_chain(localhost_cert) context = ssl.create_default_context(cafile=localhost_cert) else: sslctx = None context = None self.server = server = TestHTTPServer(addr, self.handle_request, 0.01, sslctx=sslctx) server.start() server.ready.wait() host = 'localhost:%d' % server.server_port secure_client = secure and sslctx self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob', secure=secure_client, context=context, credentials=('foo', 'bar')) self.log_data = None root_logger.addHandler(self.h_hdlr) for method in ('GET', 'POST'): self.h_hdlr.method = method self.handled.clear() msg = "sp\xe4m" logger.error(msg) self.handled.wait() self.assertEqual(self.log_data.path, '/frob') self.assertEqual(self.command, method) if method == 'GET': d = parse_qs(self.log_data.query) else: d = parse_qs(self.post_data.decode('utf-8')) self.assertEqual(d['name'], ['http']) self.assertEqual(d['funcName'], ['test_output']) self.assertEqual(d['msg'], [msg]) self.server.stop(2.0) self.root_logger.removeHandler(self.h_hdlr) self.h_hdlr.close() class MemoryTest(BaseTest): """Test memory persistence of logger objects.""" def setUp(self): """Create a dict to remember potentially destroyed objects.""" BaseTest.setUp(self) self._survivors = {} def _watch_for_survival(self, *args): """Watch the given objects for survival, by creating weakrefs to them.""" for obj in args: key = id(obj), repr(obj) self._survivors[key] = weakref.ref(obj) def _assertTruesurvival(self): """Assert that all objects watched for survival have survived.""" # Trigger cycle breaking. gc.collect() dead = [] for (id_, repr_), ref in self._survivors.items(): if ref() is None: dead.append(repr_) if dead: self.fail("%d objects should have survived " "but have been destroyed: %s" % (len(dead), ", ".join(dead))) def test_persistent_loggers(self): # Logger objects are persistent and retain their configuration, even # if visible references are destroyed. self.root_logger.setLevel(logging.INFO) foo = logging.getLogger("foo") self._watch_for_survival(foo) foo.setLevel(logging.DEBUG) self.root_logger.debug(self.next_message()) foo.debug(self.next_message()) self.assert_log_lines([ ('foo', 'DEBUG', '2'), ]) del foo # foo has survived. self._assertTruesurvival() # foo has retained its settings. bar = logging.getLogger("foo") bar.debug(self.next_message()) self.assert_log_lines([ ('foo', 'DEBUG', '2'), ('foo', 'DEBUG', '3'), ]) class EncodingTest(BaseTest): def test_encoding_plain_file(self): # In Python 2.x, a plain file object is treated as having no encoding. log = logging.getLogger("test") fd, fn = tempfile.mkstemp(".log", "test_logging-1-") os.close(fd) # the non-ascii data we write to the log. data = "foo\x80" try: handler = logging.FileHandler(fn, encoding="utf-8") log.addHandler(handler) try: # write non-ascii data to the log. log.warning(data) finally: log.removeHandler(handler) handler.close() # check we wrote exactly those bytes, ignoring trailing \n etc f = open(fn, encoding="utf-8") try: self.assertEqual(f.read().rstrip(), data) finally: f.close() finally: if os.path.isfile(fn): os.remove(fn) def test_encoding_cyrillic_unicode(self): log = logging.getLogger("test") #Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye) message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f' #Ensure it's written in a Cyrillic encoding writer_class = codecs.getwriter('cp1251') writer_class.encoding = 'cp1251' stream = io.BytesIO() writer = writer_class(stream, 'strict') handler = logging.StreamHandler(writer) log.addHandler(handler) try: log.warning(message) finally: log.removeHandler(handler) handler.close() # check we wrote exactly those bytes, ignoring trailing \n etc s = stream.getvalue() #Compare against what the data should be when encoded in CP-1251 self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n') class WarningsTest(BaseTest): def test_warnings(self): with warnings.catch_warnings(): logging.captureWarnings(True) self.addCleanup(logging.captureWarnings, False) warnings.filterwarnings("always", category=UserWarning) stream = io.StringIO() h = logging.StreamHandler(stream) logger = logging.getLogger("py.warnings") logger.addHandler(h) warnings.warn("I'm warning you...") logger.removeHandler(h) s = stream.getvalue() h.close() self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0) #See if an explicit file uses the original implementation a_file = io.StringIO() warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, a_file, "Dummy line") s = a_file.getvalue() a_file.close() self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n") def test_warnings_no_handlers(self): with warnings.catch_warnings(): logging.captureWarnings(True) self.addCleanup(logging.captureWarnings, False) # confirm our assumption: no loggers are set logger = logging.getLogger("py.warnings") self.assertEqual(logger.handlers, []) warnings.showwarning("Explicit", UserWarning, "dummy.py", 42) self.assertEqual(len(logger.handlers), 1) self.assertIsInstance(logger.handlers[0], logging.NullHandler) def formatFunc(format, datefmt=None): return logging.Formatter(format, datefmt) def handlerFunc(): return logging.StreamHandler() class CustomHandler(logging.StreamHandler): pass class ConfigDictTest(BaseTest): """Reading logging config from a dictionary.""" expected_log_pat = r"^(\w+) \+\+ (\w+)$" # config0 is a standard configuration. config0 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } # config1 adds a little to the standard configuration. config1 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config1a moves the handler to the root. Used with config8a config1a = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } # config2 has a subtle configuration error that should be reported config2 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdbout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config1 but with a misspelt level on a handler config2a = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NTOSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config1 but with a misspelt level on a logger config2b = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WRANING', }, } # config3 has a less subtle configuration error config3 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'misspelled_name', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config4 specifies a custom formatter class to be loaded config4 = { 'version': 1, 'formatters': { 'form1' : { '()' : __name__ + '.ExceptionFormatter', 'format' : '%(levelname)s:%(name)s:%(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'root' : { 'level' : 'NOTSET', 'handlers' : ['hand1'], }, } # As config4 but using an actual callable rather than a string config4a = { 'version': 1, 'formatters': { 'form1' : { '()' : ExceptionFormatter, 'format' : '%(levelname)s:%(name)s:%(message)s', }, 'form2' : { '()' : __name__ + '.formatFunc', 'format' : '%(levelname)s:%(name)s:%(message)s', }, 'form3' : { '()' : formatFunc, 'format' : '%(levelname)s:%(name)s:%(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, 'hand2' : { '()' : handlerFunc, }, }, 'root' : { 'level' : 'NOTSET', 'handlers' : ['hand1'], }, } # config5 specifies a custom handler class to be loaded config5 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : __name__ + '.CustomHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config6 specifies a custom handler class to be loaded # but has bad arguments config6 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : __name__ + '.CustomHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', '9' : 'invalid parameter name', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #config 7 does not define compiler.parser but defines compiler.lexer #so compiler.parser should be disabled after applying it config7 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.lexer' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config8 defines both compiler and compiler.lexer # so compiler.parser should not be disabled (since # compiler is defined) config8 = { 'version': 1, 'disable_existing_loggers' : False, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, 'compiler.lexer' : { }, }, 'root' : { 'level' : 'WARNING', }, } # config8a disables existing loggers config8a = { 'version': 1, 'disable_existing_loggers' : True, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, 'compiler.lexer' : { }, }, 'root' : { 'level' : 'WARNING', }, } config9 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'WARNING', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'NOTSET', }, } config9a = { 'version': 1, 'incremental' : True, 'handlers' : { 'hand1' : { 'level' : 'WARNING', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'INFO', }, }, } config9b = { 'version': 1, 'incremental' : True, 'handlers' : { 'hand1' : { 'level' : 'INFO', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'INFO', }, }, } #As config1 but with a filter added config10 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'filters' : { 'filt1' : { 'name' : 'compiler.parser', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', 'filters' : ['filt1'], }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'filters' : ['filt1'], }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } #As config1 but using cfg:// references config11 = { 'version': 1, 'true_formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handler_configs': { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'formatters' : 'cfg://true_formatters', 'handlers' : { 'hand1' : 'cfg://handler_configs[hand1]', }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config11 but missing the version key config12 = { 'true_formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handler_configs': { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'formatters' : 'cfg://true_formatters', 'handlers' : { 'hand1' : 'cfg://handler_configs[hand1]', }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config11 but using an unsupported version config13 = { 'version': 2, 'true_formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handler_configs': { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'formatters' : 'cfg://true_formatters', 'handlers' : { 'hand1' : 'cfg://handler_configs[hand1]', }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # As config0, but with properties config14 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', '.': { 'foo': 'bar', 'terminator': '!\n', } }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } out_of_order = { "version": 1, "formatters": { "mySimpleFormatter": { "format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s", "style": "$" } }, "handlers": { "fileGlobal": { "class": "logging.StreamHandler", "level": "DEBUG", "formatter": "mySimpleFormatter" }, "bufferGlobal": { "class": "logging.handlers.MemoryHandler", "capacity": 5, "formatter": "mySimpleFormatter", "target": "fileGlobal", "level": "DEBUG" } }, "loggers": { "mymodule": { "level": "DEBUG", "handlers": ["bufferGlobal"], "propagate": "true" } } } def apply_config(self, conf): logging.config.dictConfig(conf) def test_config0_ok(self): # A simple config which overrides the default settings. with support.captured_stdout() as output: self.apply_config(self.config0) logger = logging.getLogger() # Won't output anything logger.info(self.next_message()) # Outputs a message logger.error(self.next_message()) self.assert_log_lines([ ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_config1_ok(self, config=config1): # A config defining a sub-parser as well. with support.captured_stdout() as output: self.apply_config(config) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_config2_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2) def test_config2a_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2a) def test_config2b_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2b) def test_config3_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config3) def test_config4_ok(self): # A config specifying a custom formatter class. with support.captured_stdout() as output: self.apply_config(self.config4) #logger = logging.getLogger() try: raise RuntimeError() except RuntimeError: logging.exception("just testing") sys.stdout.seek(0) self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output is empty self.assert_log_lines([]) def test_config4a_ok(self): # A config specifying a custom formatter class. with support.captured_stdout() as output: self.apply_config(self.config4a) #logger = logging.getLogger() try: raise RuntimeError() except RuntimeError: logging.exception("just testing") sys.stdout.seek(0) self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output is empty self.assert_log_lines([]) def test_config5_ok(self): self.test_config1_ok(config=self.config5) def test_config6_failure(self): self.assertRaises(Exception, self.apply_config, self.config6) def test_config7_ok(self): with support.captured_stdout() as output: self.apply_config(self.config1) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) with support.captured_stdout() as output: self.apply_config(self.config7) logger = logging.getLogger("compiler.parser") self.assertTrue(logger.disabled) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ('ERROR', '4'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) #Same as test_config_7_ok but don't disable old loggers. def test_config_8_ok(self): with support.captured_stdout() as output: self.apply_config(self.config1) logger = logging.getLogger("compiler.parser") # All will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) with support.captured_stdout() as output: self.apply_config(self.config8) logger = logging.getLogger("compiler.parser") self.assertFalse(logger.disabled) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ('ERROR', '4'), ('INFO', '5'), ('ERROR', '6'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_config_8a_ok(self): with support.captured_stdout() as output: self.apply_config(self.config1a) logger = logging.getLogger("compiler.parser") # See issue #11424. compiler-hyphenated sorts # between compiler and compiler.xyz and this # was preventing compiler.xyz from being included # in the child loggers of compiler because of an # overzealous loop termination condition. hyphenated = logging.getLogger('compiler-hyphenated') # All will output a message logger.info(self.next_message()) logger.error(self.next_message()) hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ('CRITICAL', '3'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) with support.captured_stdout() as output: self.apply_config(self.config8a) logger = logging.getLogger("compiler.parser") self.assertFalse(logger.disabled) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) # Will not appear hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '4'), ('ERROR', '5'), ('INFO', '6'), ('ERROR', '7'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_config_9_ok(self): with support.captured_stdout() as output: self.apply_config(self.config9) logger = logging.getLogger("compiler.parser") #Nothing will be output since both handler and logger are set to WARNING logger.info(self.next_message()) self.assert_log_lines([], stream=output) self.apply_config(self.config9a) #Nothing will be output since both handler is still set to WARNING logger.info(self.next_message()) self.assert_log_lines([], stream=output) self.apply_config(self.config9b) #Message should now be output logger.info(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ], stream=output) def test_config_10_ok(self): with support.captured_stdout() as output: self.apply_config(self.config10) logger = logging.getLogger("compiler.parser") logger.warning(self.next_message()) logger = logging.getLogger('compiler') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger('compiler.lexer') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger("compiler.parser.codegen") #Output, as not filtered logger.error(self.next_message()) self.assert_log_lines([ ('WARNING', '1'), ('ERROR', '4'), ], stream=output) def test_config11_ok(self): self.test_config1_ok(self.config11) def test_config12_failure(self): self.assertRaises(Exception, self.apply_config, self.config12) def test_config13_failure(self): self.assertRaises(Exception, self.apply_config, self.config13) def test_config14_ok(self): with support.captured_stdout() as output: self.apply_config(self.config14) h = logging._handlers['hand1'] self.assertEqual(h.foo, 'bar') self.assertEqual(h.terminator, '!\n') logging.warning('Exclamation') self.assertTrue(output.getvalue().endswith('Exclamation!\n')) def setup_via_listener(self, text, verify=None): text = text.encode("utf-8") # Ask for a randomly assigned port (by using port 0) t = logging.config.listen(0, verify) t.start() t.ready.wait() # Now get the port allocated port = t.port t.ready.clear() try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(2.0) sock.connect(('localhost', port)) slen = struct.pack('>L', len(text)) s = slen + text sentsofar = 0 left = len(s) while left > 0: sent = sock.send(s[sentsofar:]) sentsofar += sent left -= sent sock.close() finally: t.ready.wait(2.0) logging.config.stopListening() support.join_thread(t, 2.0) def test_listen_config_10_ok(self): with support.captured_stdout() as output: self.setup_via_listener(json.dumps(self.config10)) logger = logging.getLogger("compiler.parser") logger.warning(self.next_message()) logger = logging.getLogger('compiler') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger('compiler.lexer') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger("compiler.parser.codegen") #Output, as not filtered logger.error(self.next_message()) self.assert_log_lines([ ('WARNING', '1'), ('ERROR', '4'), ], stream=output) def test_listen_config_1_ok(self): with support.captured_stdout() as output: self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1)) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output is empty. self.assert_log_lines([]) def test_listen_verify(self): def verify_fail(stuff): return None def verify_reverse(stuff): return stuff[::-1] logger = logging.getLogger("compiler.parser") to_send = textwrap.dedent(ConfigFileTest.config1) # First, specify a verification function that will fail. # We expect to see no output, since our configuration # never took effect. with support.captured_stdout() as output: self.setup_via_listener(to_send, verify_fail) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([], stream=output) # Original logger output has the stuff we logged. self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], pat=r"^[\w.]+ -> (\w+): (\d+)$") # Now, perform no verification. Our configuration # should take effect. with support.captured_stdout() as output: self.setup_via_listener(to_send) # no verify callable specified logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ('ERROR', '4'), ], stream=output) # Original logger output still has the stuff we logged before. self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], pat=r"^[\w.]+ -> (\w+): (\d+)$") # Now, perform verification which transforms the bytes. with support.captured_stdout() as output: self.setup_via_listener(to_send[::-1], verify_reverse) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '5'), ('ERROR', '6'), ], stream=output) # Original logger output still has the stuff we logged before. self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], pat=r"^[\w.]+ -> (\w+): (\d+)$") def test_out_of_order(self): self.apply_config(self.out_of_order) handler = logging.getLogger('mymodule').handlers[0] self.assertIsInstance(handler.target, logging.Handler) self.assertIsInstance(handler.formatter._style, logging.StringTemplateStyle) def test_baseconfig(self): d = { 'atuple': (1, 2, 3), 'alist': ['a', 'b', 'c'], 'adict': {'d': 'e', 'f': 3 }, 'nest1': ('g', ('h', 'i'), 'j'), 'nest2': ['k', ['l', 'm'], 'n'], 'nest3': ['o', 'cfg://alist', 'p'], } bc = logging.config.BaseConfigurator(d) self.assertEqual(bc.convert('cfg://atuple[1]'), 2) self.assertEqual(bc.convert('cfg://alist[1]'), 'b') self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h') self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm') self.assertEqual(bc.convert('cfg://adict.d'), 'e') self.assertEqual(bc.convert('cfg://adict[f]'), 3) v = bc.convert('cfg://nest3') self.assertEqual(v.pop(1), ['a', 'b', 'c']) self.assertRaises(KeyError, bc.convert, 'cfg://nosuch') self.assertRaises(ValueError, bc.convert, 'cfg://!') self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]') class ManagerTest(BaseTest): def test_manager_loggerclass(self): logged = [] class MyLogger(logging.Logger): def _log(self, level, msg, args, exc_info=None, extra=None): logged.append(msg) man = logging.Manager(None) self.assertRaises(TypeError, man.setLoggerClass, int) man.setLoggerClass(MyLogger) logger = man.getLogger('test') logger.warning('should appear in logged') logging.warning('should not appear in logged') self.assertEqual(logged, ['should appear in logged']) def test_set_log_record_factory(self): man = logging.Manager(None) expected = object() man.setLogRecordFactory(expected) self.assertEqual(man.logRecordFactory, expected) class ChildLoggerTest(BaseTest): def test_child_loggers(self): r = logging.getLogger() l1 = logging.getLogger('abc') l2 = logging.getLogger('def.ghi') c1 = r.getChild('xyz') c2 = r.getChild('uvw.xyz') self.assertIs(c1, logging.getLogger('xyz')) self.assertIs(c2, logging.getLogger('uvw.xyz')) c1 = l1.getChild('def') c2 = c1.getChild('ghi') c3 = l1.getChild('def.ghi') self.assertIs(c1, logging.getLogger('abc.def')) self.assertIs(c2, logging.getLogger('abc.def.ghi')) self.assertIs(c2, c3) class DerivedLogRecord(logging.LogRecord): pass class LogRecordFactoryTest(BaseTest): def setUp(self): class CheckingFilter(logging.Filter): def __init__(self, cls): self.cls = cls def filter(self, record): t = type(record) if t is not self.cls: msg = 'Unexpected LogRecord type %s, expected %s' % (t, self.cls) raise TypeError(msg) return True BaseTest.setUp(self) self.filter = CheckingFilter(DerivedLogRecord) self.root_logger.addFilter(self.filter) self.orig_factory = logging.getLogRecordFactory() def tearDown(self): self.root_logger.removeFilter(self.filter) BaseTest.tearDown(self) logging.setLogRecordFactory(self.orig_factory) def test_logrecord_class(self): self.assertRaises(TypeError, self.root_logger.warning, self.next_message()) logging.setLogRecordFactory(DerivedLogRecord) self.root_logger.error(self.next_message()) self.assert_log_lines([ ('root', 'ERROR', '2'), ]) class QueueHandlerTest(BaseTest): # Do not bother with a logger name group. expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$" def setUp(self): BaseTest.setUp(self) self.queue = queue.Queue(-1) self.que_hdlr = logging.handlers.QueueHandler(self.queue) self.name = 'que' self.que_logger = logging.getLogger('que') self.que_logger.propagate = False self.que_logger.setLevel(logging.WARNING) self.que_logger.addHandler(self.que_hdlr) def tearDown(self): self.que_hdlr.close() BaseTest.tearDown(self) def test_queue_handler(self): self.que_logger.debug(self.next_message()) self.assertRaises(queue.Empty, self.queue.get_nowait) self.que_logger.info(self.next_message()) self.assertRaises(queue.Empty, self.queue.get_nowait) msg = self.next_message() self.que_logger.warning(msg) data = self.queue.get_nowait() self.assertTrue(isinstance(data, logging.LogRecord)) self.assertEqual(data.name, self.que_logger.name) self.assertEqual((data.msg, data.args), (msg, None)) def test_formatting(self): msg = self.next_message() levelname = logging.getLevelName(logging.WARNING) log_format_str = '{name} -> {levelname}: {message}' formatted_msg = log_format_str.format(name=self.name, levelname=levelname, message=msg) formatter = logging.Formatter(self.log_format) self.que_hdlr.setFormatter(formatter) self.que_logger.warning(msg) log_record = self.queue.get_nowait() self.assertEqual(formatted_msg, log_record.msg) self.assertEqual(formatted_msg, log_record.message) @unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'), 'logging.handlers.QueueListener required for this test') def test_queue_listener(self): handler = support.TestHandler(support.Matcher()) listener = logging.handlers.QueueListener(self.queue, handler) listener.start() try: self.que_logger.warning(self.next_message()) self.que_logger.error(self.next_message()) self.que_logger.critical(self.next_message()) finally: listener.stop() self.assertTrue(handler.matches(levelno=logging.WARNING, message='1')) self.assertTrue(handler.matches(levelno=logging.ERROR, message='2')) self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3')) handler.close() # Now test with respect_handler_level set handler = support.TestHandler(support.Matcher()) handler.setLevel(logging.CRITICAL) listener = logging.handlers.QueueListener(self.queue, handler, respect_handler_level=True) listener.start() try: self.que_logger.warning(self.next_message()) self.que_logger.error(self.next_message()) self.que_logger.critical(self.next_message()) finally: listener.stop() self.assertFalse(handler.matches(levelno=logging.WARNING, message='4')) self.assertFalse(handler.matches(levelno=logging.ERROR, message='5')) self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6')) if hasattr(logging.handlers, 'QueueListener'): import multiprocessing from unittest.mock import patch class QueueListenerTest(BaseTest): """ Tests based on patch submitted for issue #27930. Ensure that QueueListener handles all log messages. """ repeat = 20 @staticmethod def setup_and_log(log_queue, ident): """ Creates a logger with a QueueHandler that logs to a queue read by a QueueListener. Starts the listener, logs five messages, and stops the listener. """ logger = logging.getLogger('test_logger_with_id_%s' % ident) logger.setLevel(logging.DEBUG) handler = logging.handlers.QueueHandler(log_queue) logger.addHandler(handler) listener = logging.handlers.QueueListener(log_queue) listener.start() logger.info('one') logger.info('two') logger.info('three') logger.info('four') logger.info('five') listener.stop() logger.removeHandler(handler) handler.close() @patch.object(logging.handlers.QueueListener, 'handle') def test_handle_called_with_queue_queue(self, mock_handle): for i in range(self.repeat): log_queue = queue.Queue() self.setup_and_log(log_queue, '%s_%s' % (self.id(), i)) self.assertEqual(mock_handle.call_count, 5 * self.repeat, 'correct number of handled log messages') @support.requires_multiprocessing_queue @patch.object(logging.handlers.QueueListener, 'handle') def test_handle_called_with_mp_queue(self, mock_handle): for i in range(self.repeat): log_queue = multiprocessing.Queue() self.setup_and_log(log_queue, '%s_%s' % (self.id(), i)) log_queue.close() log_queue.join_thread() self.assertEqual(mock_handle.call_count, 5 * self.repeat, 'correct number of handled log messages') @staticmethod def get_all_from_queue(log_queue): try: while True: yield log_queue.get_nowait() except queue.Empty: return [] @support.requires_multiprocessing_queue def test_no_messages_in_queue_after_stop(self): """ Five messages are logged then the QueueListener is stopped. This test then gets everything off the queue. Failure of this test indicates that messages were not registered on the queue until _after_ the QueueListener stopped. """ for i in range(self.repeat): queue = multiprocessing.Queue() self.setup_and_log(queue, '%s_%s' %(self.id(), i)) # time.sleep(1) items = list(self.get_all_from_queue(queue)) queue.close() queue.join_thread() expected = [[], [logging.handlers.QueueListener._sentinel]] self.assertIn(items, expected, 'Found unexpected messages in queue: %s' % ( [m.msg if isinstance(m, logging.LogRecord) else m for m in items])) ZERO = datetime.timedelta(0) class UTC(datetime.tzinfo): def utcoffset(self, dt): return ZERO dst = utcoffset def tzname(self, dt): return 'UTC' utc = UTC() class FormatterTest(unittest.TestCase): def setUp(self): self.common = { 'name': 'formatter.test', 'level': logging.DEBUG, 'pathname': os.path.join('path', 'to', 'dummy.ext'), 'lineno': 42, 'exc_info': None, 'func': None, 'msg': 'Message with %d %s', 'args': (2, 'placeholders'), } self.variants = { } def get_record(self, name=None): result = dict(self.common) if name is not None: result.update(self.variants[name]) return logging.makeLogRecord(result) def test_percent(self): # Test %-formatting r = self.get_record() f = logging.Formatter('${%(message)s}') self.assertEqual(f.format(r), '${Message with 2 placeholders}') f = logging.Formatter('%(random)s') self.assertRaises(KeyError, f.format, r) self.assertFalse(f.usesTime()) f = logging.Formatter('%(asctime)s') self.assertTrue(f.usesTime()) f = logging.Formatter('%(asctime)-15s') self.assertTrue(f.usesTime()) f = logging.Formatter('asctime') self.assertFalse(f.usesTime()) def test_braces(self): # Test {}-formatting r = self.get_record() f = logging.Formatter('$%{message}%$', style='{') self.assertEqual(f.format(r), '$%Message with 2 placeholders%$') f = logging.Formatter('{random}', style='{') self.assertRaises(KeyError, f.format, r) self.assertFalse(f.usesTime()) f = logging.Formatter('{asctime}', style='{') self.assertTrue(f.usesTime()) f = logging.Formatter('{asctime!s:15}', style='{') self.assertTrue(f.usesTime()) f = logging.Formatter('{asctime:15}', style='{') self.assertTrue(f.usesTime()) f = logging.Formatter('asctime', style='{') self.assertFalse(f.usesTime()) def test_dollars(self): # Test $-formatting r = self.get_record() f = logging.Formatter('$message', style='$') self.assertEqual(f.format(r), 'Message with 2 placeholders') f = logging.Formatter('$$%${message}%$$', style='$') self.assertEqual(f.format(r), '$%Message with 2 placeholders%$') f = logging.Formatter('${random}', style='$') self.assertRaises(KeyError, f.format, r) self.assertFalse(f.usesTime()) f = logging.Formatter('${asctime}', style='$') self.assertTrue(f.usesTime()) f = logging.Formatter('${asctime', style='$') self.assertFalse(f.usesTime()) f = logging.Formatter('$asctime', style='$') self.assertTrue(f.usesTime()) f = logging.Formatter('asctime', style='$') self.assertFalse(f.usesTime()) def test_invalid_style(self): self.assertRaises(ValueError, logging.Formatter, None, None, 'x') def test_time(self): r = self.get_record() dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc) # We use None to indicate we want the local timezone # We're essentially converting a UTC time to local time r.created = time.mktime(dt.astimezone(None).timetuple()) r.msecs = 123 f = logging.Formatter('%(asctime)s %(message)s') f.converter = time.gmtime self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123') self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21') f.format(r) self.assertEqual(r.asctime, '1993-04-21 08:03:00,123') class TestBufferingFormatter(logging.BufferingFormatter): def formatHeader(self, records): return '[(%d)' % len(records) def formatFooter(self, records): return '(%d)]' % len(records) class BufferingFormatterTest(unittest.TestCase): def setUp(self): self.records = [ logging.makeLogRecord({'msg': 'one'}), logging.makeLogRecord({'msg': 'two'}), ] def test_default(self): f = logging.BufferingFormatter() self.assertEqual('', f.format([])) self.assertEqual('onetwo', f.format(self.records)) def test_custom(self): f = TestBufferingFormatter() self.assertEqual('[(2)onetwo(2)]', f.format(self.records)) lf = logging.Formatter('<%(message)s>') f = TestBufferingFormatter(lf) self.assertEqual('[(2)<one><two>(2)]', f.format(self.records)) class ExceptionTest(BaseTest): def test_formatting(self): r = self.root_logger h = RecordingHandler() r.addHandler(h) try: raise RuntimeError('deliberate mistake') except: logging.exception('failed', stack_info=True) r.removeHandler(h) h.close() r = h.records[0] self.assertTrue(r.exc_text.startswith('Traceback (most recent ' 'call last):\n')) self.assertTrue(r.exc_text.endswith('\nRuntimeError: ' 'deliberate mistake')) self.assertTrue(r.stack_info.startswith('Stack (most recent ' 'call last):\n')) self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', ' 'stack_info=True)')) class LastResortTest(BaseTest): def test_last_resort(self): # Test the last resort handler root = self.root_logger root.removeHandler(self.root_hdlr) old_lastresort = logging.lastResort old_raise_exceptions = logging.raiseExceptions try: with support.captured_stderr() as stderr: root.debug('This should not appear') self.assertEqual(stderr.getvalue(), '') root.warning('Final chance!') self.assertEqual(stderr.getvalue(), 'Final chance!\n') # No handlers and no last resort, so 'No handlers' message logging.lastResort = None with support.captured_stderr() as stderr: root.warning('Final chance!') msg = 'No handlers could be found for logger "root"\n' self.assertEqual(stderr.getvalue(), msg) # 'No handlers' message only printed once with support.captured_stderr() as stderr: root.warning('Final chance!') self.assertEqual(stderr.getvalue(), '') # If raiseExceptions is False, no message is printed root.manager.emittedNoHandlerWarning = False logging.raiseExceptions = False with support.captured_stderr() as stderr: root.warning('Final chance!') self.assertEqual(stderr.getvalue(), '') finally: root.addHandler(self.root_hdlr) logging.lastResort = old_lastresort logging.raiseExceptions = old_raise_exceptions class FakeHandler: def __init__(self, identifier, called): for method in ('acquire', 'flush', 'close', 'release'): setattr(self, method, self.record_call(identifier, method, called)) def record_call(self, identifier, method_name, called): def inner(): called.append('{} - {}'.format(identifier, method_name)) return inner class RecordingHandler(logging.NullHandler): def __init__(self, *args, **kwargs): super(RecordingHandler, self).__init__(*args, **kwargs) self.records = [] def handle(self, record): """Keep track of all the emitted records.""" self.records.append(record) class ShutdownTest(BaseTest): """Test suite for the shutdown method.""" def setUp(self): super(ShutdownTest, self).setUp() self.called = [] raise_exceptions = logging.raiseExceptions self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions) def raise_error(self, error): def inner(): raise error() return inner def test_no_failure(self): # create some fake handlers handler0 = FakeHandler(0, self.called) handler1 = FakeHandler(1, self.called) handler2 = FakeHandler(2, self.called) # create live weakref to those handlers handlers = map(logging.weakref.ref, [handler0, handler1, handler2]) logging.shutdown(handlerList=list(handlers)) expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release', '1 - acquire', '1 - flush', '1 - close', '1 - release', '0 - acquire', '0 - flush', '0 - close', '0 - release'] self.assertEqual(expected, self.called) def _test_with_failure_in_method(self, method, error): handler = FakeHandler(0, self.called) setattr(handler, method, self.raise_error(error)) handlers = [logging.weakref.ref(handler)] logging.shutdown(handlerList=list(handlers)) self.assertEqual('0 - release', self.called[-1]) def test_with_ioerror_in_acquire(self): self._test_with_failure_in_method('acquire', OSError) def test_with_ioerror_in_flush(self): self._test_with_failure_in_method('flush', OSError) def test_with_ioerror_in_close(self): self._test_with_failure_in_method('close', OSError) def test_with_valueerror_in_acquire(self): self._test_with_failure_in_method('acquire', ValueError) def test_with_valueerror_in_flush(self): self._test_with_failure_in_method('flush', ValueError) def test_with_valueerror_in_close(self): self._test_with_failure_in_method('close', ValueError) def test_with_other_error_in_acquire_without_raise(self): logging.raiseExceptions = False self._test_with_failure_in_method('acquire', IndexError) def test_with_other_error_in_flush_without_raise(self): logging.raiseExceptions = False self._test_with_failure_in_method('flush', IndexError) def test_with_other_error_in_close_without_raise(self): logging.raiseExceptions = False self._test_with_failure_in_method('close', IndexError) def test_with_other_error_in_acquire_with_raise(self): logging.raiseExceptions = True self.assertRaises(IndexError, self._test_with_failure_in_method, 'acquire', IndexError) def test_with_other_error_in_flush_with_raise(self): logging.raiseExceptions = True self.assertRaises(IndexError, self._test_with_failure_in_method, 'flush', IndexError) def test_with_other_error_in_close_with_raise(self): logging.raiseExceptions = True self.assertRaises(IndexError, self._test_with_failure_in_method, 'close', IndexError) class ModuleLevelMiscTest(BaseTest): """Test suite for some module level methods.""" def test_disable(self): old_disable = logging.root.manager.disable # confirm our assumptions are correct self.assertEqual(old_disable, 0) self.addCleanup(logging.disable, old_disable) logging.disable(83) self.assertEqual(logging.root.manager.disable, 83) # test the default value introduced in 3.7 # (Issue #28524) logging.disable() self.assertEqual(logging.root.manager.disable, logging.CRITICAL) def _test_log(self, method, level=None): called = [] support.patch(self, logging, 'basicConfig', lambda *a, **kw: called.append((a, kw))) recording = RecordingHandler() logging.root.addHandler(recording) log_method = getattr(logging, method) if level is not None: log_method(level, "test me: %r", recording) else: log_method("test me: %r", recording) self.assertEqual(len(recording.records), 1) record = recording.records[0] self.assertEqual(record.getMessage(), "test me: %r" % recording) expected_level = level if level is not None else getattr(logging, method.upper()) self.assertEqual(record.levelno, expected_level) # basicConfig was not called! self.assertEqual(called, []) def test_log(self): self._test_log('log', logging.ERROR) def test_debug(self): self._test_log('debug') def test_info(self): self._test_log('info') def test_warning(self): self._test_log('warning') def test_error(self): self._test_log('error') def test_critical(self): self._test_log('critical') def test_set_logger_class(self): self.assertRaises(TypeError, logging.setLoggerClass, object) class MyLogger(logging.Logger): pass logging.setLoggerClass(MyLogger) self.assertEqual(logging.getLoggerClass(), MyLogger) logging.setLoggerClass(logging.Logger) self.assertEqual(logging.getLoggerClass(), logging.Logger) @support.requires_type_collecting def test_logging_at_shutdown(self): # Issue #20037 code = """if 1: import logging class A: def __del__(self): try: raise ValueError("some error") except Exception: logging.exception("exception in __del__") a = A()""" rc, out, err = assert_python_ok("-c", code) err = err.decode() self.assertIn("exception in __del__", err) self.assertIn("ValueError: some error", err) class LogRecordTest(BaseTest): def test_str_rep(self): r = logging.makeLogRecord({}) s = str(r) self.assertTrue(s.startswith('<LogRecord: ')) self.assertTrue(s.endswith('>')) def test_dict_arg(self): h = RecordingHandler() r = logging.getLogger() r.addHandler(h) d = {'less' : 'more' } logging.warning('less is %(less)s', d) self.assertIs(h.records[0].args, d) self.assertEqual(h.records[0].message, 'less is more') r.removeHandler(h) h.close() def test_multiprocessing(self): r = logging.makeLogRecord({}) self.assertEqual(r.processName, 'MainProcess') try: import multiprocessing as mp r = logging.makeLogRecord({}) self.assertEqual(r.processName, mp.current_process().name) except ImportError: pass def test_optional(self): r = logging.makeLogRecord({}) NOT_NONE = self.assertIsNotNone NOT_NONE(r.thread) NOT_NONE(r.threadName) NOT_NONE(r.process) NOT_NONE(r.processName) log_threads = logging.logThreads log_processes = logging.logProcesses log_multiprocessing = logging.logMultiprocessing try: logging.logThreads = False logging.logProcesses = False logging.logMultiprocessing = False r = logging.makeLogRecord({}) NONE = self.assertIsNone NONE(r.thread) NONE(r.threadName) NONE(r.process) NONE(r.processName) finally: logging.logThreads = log_threads logging.logProcesses = log_processes logging.logMultiprocessing = log_multiprocessing class BasicConfigTest(unittest.TestCase): """Test suite for logging.basicConfig.""" def setUp(self): super(BasicConfigTest, self).setUp() self.handlers = logging.root.handlers self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.original_logging_level = logging.root.level self.addCleanup(self.cleanup) logging.root.handlers = [] def tearDown(self): for h in logging.root.handlers[:]: logging.root.removeHandler(h) h.close() super(BasicConfigTest, self).tearDown() def cleanup(self): setattr(logging.root, 'handlers', self.handlers) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list logging.root.level = self.original_logging_level def test_no_kwargs(self): logging.basicConfig() # handler defaults to a StreamHandler to sys.stderr self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.StreamHandler) self.assertEqual(handler.stream, sys.stderr) formatter = handler.formatter # format defaults to logging.BASIC_FORMAT self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT) # datefmt defaults to None self.assertIsNone(formatter.datefmt) # style defaults to % self.assertIsInstance(formatter._style, logging.PercentStyle) # level is not explicitly set self.assertEqual(logging.root.level, self.original_logging_level) def test_strformatstyle(self): with support.captured_stdout() as output: logging.basicConfig(stream=sys.stdout, style="{") logging.error("Log an error") sys.stdout.seek(0) self.assertEqual(output.getvalue().strip(), "ERROR:root:Log an error") def test_stringtemplatestyle(self): with support.captured_stdout() as output: logging.basicConfig(stream=sys.stdout, style="$") logging.error("Log an error") sys.stdout.seek(0) self.assertEqual(output.getvalue().strip(), "ERROR:root:Log an error") def test_filename(self): def cleanup(h1, h2, fn): h1.close() h2.close() os.remove(fn) logging.basicConfig(filename='test.log') self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.FileHandler) expected = logging.FileHandler('test.log', 'a') self.assertEqual(handler.stream.mode, expected.stream.mode) self.assertEqual(handler.stream.name, expected.stream.name) self.addCleanup(cleanup, handler, expected, 'test.log') def test_filemode(self): def cleanup(h1, h2, fn): h1.close() h2.close() os.remove(fn) logging.basicConfig(filename='test.log', filemode='wb') handler = logging.root.handlers[0] expected = logging.FileHandler('test.log', 'wb') self.assertEqual(handler.stream.mode, expected.stream.mode) self.addCleanup(cleanup, handler, expected, 'test.log') def test_stream(self): stream = io.StringIO() self.addCleanup(stream.close) logging.basicConfig(stream=stream) self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.StreamHandler) self.assertEqual(handler.stream, stream) def test_format(self): logging.basicConfig(format='foo') formatter = logging.root.handlers[0].formatter self.assertEqual(formatter._style._fmt, 'foo') def test_datefmt(self): logging.basicConfig(datefmt='bar') formatter = logging.root.handlers[0].formatter self.assertEqual(formatter.datefmt, 'bar') def test_style(self): logging.basicConfig(style='$') formatter = logging.root.handlers[0].formatter self.assertIsInstance(formatter._style, logging.StringTemplateStyle) def test_level(self): old_level = logging.root.level self.addCleanup(logging.root.setLevel, old_level) logging.basicConfig(level=57) self.assertEqual(logging.root.level, 57) # Test that second call has no effect logging.basicConfig(level=58) self.assertEqual(logging.root.level, 57) def test_incompatible(self): assertRaises = self.assertRaises handlers = [logging.StreamHandler()] stream = sys.stderr assertRaises(ValueError, logging.basicConfig, filename='test.log', stream=stream) assertRaises(ValueError, logging.basicConfig, filename='test.log', handlers=handlers) assertRaises(ValueError, logging.basicConfig, stream=stream, handlers=handlers) # Issue 23207: test for invalid kwargs assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO) # Should pop both filename and filemode even if filename is None logging.basicConfig(filename=None, filemode='a') def test_handlers(self): handlers = [ logging.StreamHandler(), logging.StreamHandler(sys.stdout), logging.StreamHandler(), ] f = logging.Formatter() handlers[2].setFormatter(f) logging.basicConfig(handlers=handlers) self.assertIs(handlers[0], logging.root.handlers[0]) self.assertIs(handlers[1], logging.root.handlers[1]) self.assertIs(handlers[2], logging.root.handlers[2]) self.assertIsNotNone(handlers[0].formatter) self.assertIsNotNone(handlers[1].formatter) self.assertIs(handlers[2].formatter, f) self.assertIs(handlers[0].formatter, handlers[1].formatter) def _test_log(self, method, level=None): # logging.root has no handlers so basicConfig should be called called = [] old_basic_config = logging.basicConfig def my_basic_config(*a, **kw): old_basic_config() old_level = logging.root.level logging.root.setLevel(100) # avoid having messages in stderr self.addCleanup(logging.root.setLevel, old_level) called.append((a, kw)) support.patch(self, logging, 'basicConfig', my_basic_config) log_method = getattr(logging, method) if level is not None: log_method(level, "test me") else: log_method("test me") # basicConfig was called with no arguments self.assertEqual(called, [((), {})]) def test_log(self): self._test_log('log', logging.WARNING) def test_debug(self): self._test_log('debug') def test_info(self): self._test_log('info') def test_warning(self): self._test_log('warning') def test_error(self): self._test_log('error') def test_critical(self): self._test_log('critical') class LoggerAdapterTest(unittest.TestCase): def setUp(self): super(LoggerAdapterTest, self).setUp() old_handler_list = logging._handlerList[:] self.recording = RecordingHandler() self.logger = logging.root self.logger.addHandler(self.recording) self.addCleanup(self.logger.removeHandler, self.recording) self.addCleanup(self.recording.close) def cleanup(): logging._handlerList[:] = old_handler_list self.addCleanup(cleanup) self.addCleanup(logging.shutdown) self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None) def test_exception(self): msg = 'testing exception: %r' exc = None try: 1 / 0 except ZeroDivisionError as e: exc = e self.adapter.exception(msg, self.recording) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.levelno, logging.ERROR) self.assertEqual(record.msg, msg) self.assertEqual(record.args, (self.recording,)) self.assertEqual(record.exc_info, (exc.__class__, exc, exc.__traceback__)) def test_exception_excinfo(self): try: 1 / 0 except ZeroDivisionError as e: exc = e self.adapter.exception('exc_info test', exc_info=exc) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.exc_info, (exc.__class__, exc, exc.__traceback__)) def test_critical(self): msg = 'critical test! %r' self.adapter.critical(msg, self.recording) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.levelno, logging.CRITICAL) self.assertEqual(record.msg, msg) self.assertEqual(record.args, (self.recording,)) def test_is_enabled_for(self): old_disable = self.adapter.logger.manager.disable self.adapter.logger.manager.disable = 33 self.addCleanup(setattr, self.adapter.logger.manager, 'disable', old_disable) self.assertFalse(self.adapter.isEnabledFor(32)) def test_has_handlers(self): self.assertTrue(self.adapter.hasHandlers()) for handler in self.logger.handlers: self.logger.removeHandler(handler) self.assertFalse(self.logger.hasHandlers()) self.assertFalse(self.adapter.hasHandlers()) def test_nested(self): class Adapter(logging.LoggerAdapter): prefix = 'Adapter' def process(self, msg, kwargs): return f"{self.prefix} {msg}", kwargs msg = 'Adapters can be nested, yo.' adapter = Adapter(logger=self.logger, extra=None) adapter_adapter = Adapter(logger=adapter, extra=None) adapter_adapter.prefix = 'AdapterAdapter' self.assertEqual(repr(adapter), repr(adapter_adapter)) adapter_adapter.log(logging.CRITICAL, msg, self.recording) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.levelno, logging.CRITICAL) self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}") self.assertEqual(record.args, (self.recording,)) orig_manager = adapter_adapter.manager self.assertIs(adapter.manager, orig_manager) self.assertIs(self.logger.manager, orig_manager) temp_manager = object() try: adapter_adapter.manager = temp_manager self.assertIs(adapter_adapter.manager, temp_manager) self.assertIs(adapter.manager, temp_manager) self.assertIs(self.logger.manager, temp_manager) finally: adapter_adapter.manager = orig_manager self.assertIs(adapter_adapter.manager, orig_manager) self.assertIs(adapter.manager, orig_manager) self.assertIs(self.logger.manager, orig_manager) class LoggerTest(BaseTest): def setUp(self): super(LoggerTest, self).setUp() self.recording = RecordingHandler() self.logger = logging.Logger(name='blah') self.logger.addHandler(self.recording) self.addCleanup(self.logger.removeHandler, self.recording) self.addCleanup(self.recording.close) self.addCleanup(logging.shutdown) def test_set_invalid_level(self): self.assertRaises(TypeError, self.logger.setLevel, object()) def test_exception(self): msg = 'testing exception: %r' exc = None try: 1 / 0 except ZeroDivisionError as e: exc = e self.logger.exception(msg, self.recording) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.levelno, logging.ERROR) self.assertEqual(record.msg, msg) self.assertEqual(record.args, (self.recording,)) self.assertEqual(record.exc_info, (exc.__class__, exc, exc.__traceback__)) def test_log_invalid_level_with_raise(self): with support.swap_attr(logging, 'raiseExceptions', True): self.assertRaises(TypeError, self.logger.log, '10', 'test message') def test_log_invalid_level_no_raise(self): with support.swap_attr(logging, 'raiseExceptions', False): self.logger.log('10', 'test message') # no exception happens def test_find_caller_with_stack_info(self): called = [] support.patch(self, logging.traceback, 'print_stack', lambda f, file: called.append(file.getvalue())) self.logger.findCaller(stack_info=True) self.assertEqual(len(called), 1) self.assertEqual('Stack (most recent call last):\n', called[0]) def test_make_record_with_extra_overwrite(self): name = 'my record' level = 13 fn = lno = msg = args = exc_info = func = sinfo = None rv = logging._logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, sinfo) for key in ('message', 'asctime') + tuple(rv.__dict__.keys()): extra = {key: 'some value'} self.assertRaises(KeyError, self.logger.makeRecord, name, level, fn, lno, msg, args, exc_info, extra=extra, sinfo=sinfo) def test_make_record_with_extra_no_overwrite(self): name = 'my record' level = 13 fn = lno = msg = args = exc_info = func = sinfo = None extra = {'valid_key': 'some value'} result = self.logger.makeRecord(name, level, fn, lno, msg, args, exc_info, extra=extra, sinfo=sinfo) self.assertIn('valid_key', result.__dict__) def test_has_handlers(self): self.assertTrue(self.logger.hasHandlers()) for handler in self.logger.handlers: self.logger.removeHandler(handler) self.assertFalse(self.logger.hasHandlers()) def test_has_handlers_no_propagate(self): child_logger = logging.getLogger('blah.child') child_logger.propagate = False self.assertFalse(child_logger.hasHandlers()) def test_is_enabled_for(self): old_disable = self.logger.manager.disable self.logger.manager.disable = 23 self.addCleanup(setattr, self.logger.manager, 'disable', old_disable) self.assertFalse(self.logger.isEnabledFor(22)) def test_root_logger_aliases(self): root = logging.getLogger() self.assertIs(root, logging.root) self.assertIs(root, logging.getLogger(None)) self.assertIs(root, logging.getLogger('')) self.assertIs(root, logging.getLogger('foo').root) self.assertIs(root, logging.getLogger('foo.bar').root) self.assertIs(root, logging.getLogger('foo').parent) self.assertIsNot(root, logging.getLogger('\0')) self.assertIsNot(root, logging.getLogger('foo.bar').parent) def test_invalid_names(self): self.assertRaises(TypeError, logging.getLogger, any) self.assertRaises(TypeError, logging.getLogger, b'foo') def test_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'): logger = logging.getLogger(name) s = pickle.dumps(logger, proto) unpickled = pickle.loads(s) self.assertIs(unpickled, logger) def test_caching(self): root = self.root_logger logger1 = logging.getLogger("abc") logger2 = logging.getLogger("abc.def") # Set root logger level and ensure cache is empty root.setLevel(logging.ERROR) self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR) self.assertEqual(logger2._cache, {}) # Ensure cache is populated and calls are consistent self.assertTrue(logger2.isEnabledFor(logging.ERROR)) self.assertFalse(logger2.isEnabledFor(logging.DEBUG)) self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False}) self.assertEqual(root._cache, {}) self.assertTrue(logger2.isEnabledFor(logging.ERROR)) # Ensure root cache gets populated self.assertEqual(root._cache, {}) self.assertTrue(root.isEnabledFor(logging.ERROR)) self.assertEqual(root._cache, {logging.ERROR: True}) # Set parent logger level and ensure caches are emptied logger1.setLevel(logging.CRITICAL) self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL) self.assertEqual(logger2._cache, {}) # Ensure logger2 uses parent logger's effective level self.assertFalse(logger2.isEnabledFor(logging.ERROR)) # Set level to NOTSET and ensure caches are empty logger2.setLevel(logging.NOTSET) self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL) self.assertEqual(logger2._cache, {}) self.assertEqual(logger1._cache, {}) self.assertEqual(root._cache, {}) # Verify logger2 follows parent and not root self.assertFalse(logger2.isEnabledFor(logging.ERROR)) self.assertTrue(logger2.isEnabledFor(logging.CRITICAL)) self.assertFalse(logger1.isEnabledFor(logging.ERROR)) self.assertTrue(logger1.isEnabledFor(logging.CRITICAL)) self.assertTrue(root.isEnabledFor(logging.ERROR)) # Disable logging in manager and ensure caches are clear logging.disable() self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL) self.assertEqual(logger2._cache, {}) self.assertEqual(logger1._cache, {}) self.assertEqual(root._cache, {}) # Ensure no loggers are enabled self.assertFalse(logger1.isEnabledFor(logging.CRITICAL)) self.assertFalse(logger2.isEnabledFor(logging.CRITICAL)) self.assertFalse(root.isEnabledFor(logging.CRITICAL)) class BaseFileTest(BaseTest): "Base class for handler tests that write log files" def setUp(self): BaseTest.setUp(self) fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-") os.close(fd) self.rmfiles = [] def tearDown(self): for fn in self.rmfiles: os.unlink(fn) if os.path.exists(self.fn): os.unlink(self.fn) BaseTest.tearDown(self) def assertLogFile(self, filename): "Assert a log file is there and register it for deletion" self.assertTrue(os.path.exists(filename), msg="Log file %r does not exist" % filename) self.rmfiles.append(filename) class FileHandlerTest(BaseFileTest): def test_delay(self): os.unlink(self.fn) fh = logging.FileHandler(self.fn, delay=True) self.assertIsNone(fh.stream) self.assertFalse(os.path.exists(self.fn)) fh.handle(logging.makeLogRecord({})) self.assertIsNotNone(fh.stream) self.assertTrue(os.path.exists(self.fn)) fh.close() class RotatingFileHandlerTest(BaseFileTest): def next_rec(self): return logging.LogRecord('n', logging.DEBUG, 'p', 1, self.next_message(), None, None, None) def test_should_not_rollover(self): # If maxbytes is zero rollover never occurs rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0) self.assertFalse(rh.shouldRollover(None)) rh.close() def test_should_rollover(self): rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1) self.assertTrue(rh.shouldRollover(self.next_rec())) rh.close() def test_file_created(self): # checks that the file is created and assumes it was created # by us rh = logging.handlers.RotatingFileHandler(self.fn) rh.emit(self.next_rec()) self.assertLogFile(self.fn) rh.close() def test_rollover_filenames(self): def namer(name): return name + ".test" rh = logging.handlers.RotatingFileHandler( self.fn, backupCount=2, maxBytes=1) rh.namer = namer rh.emit(self.next_rec()) self.assertLogFile(self.fn) rh.emit(self.next_rec()) self.assertLogFile(namer(self.fn + ".1")) rh.emit(self.next_rec()) self.assertLogFile(namer(self.fn + ".2")) self.assertFalse(os.path.exists(namer(self.fn + ".3"))) rh.close() @support.requires_zlib def test_rotator(self): def namer(name): return name + ".gz" def rotator(source, dest): with open(source, "rb") as sf: data = sf.read() compressed = zlib.compress(data, 9) with open(dest, "wb") as df: df.write(compressed) os.remove(source) rh = logging.handlers.RotatingFileHandler( self.fn, backupCount=2, maxBytes=1) rh.rotator = rotator rh.namer = namer m1 = self.next_rec() rh.emit(m1) self.assertLogFile(self.fn) m2 = self.next_rec() rh.emit(m2) fn = namer(self.fn + ".1") self.assertLogFile(fn) newline = os.linesep with open(fn, "rb") as f: compressed = f.read() data = zlib.decompress(compressed) self.assertEqual(data.decode("ascii"), m1.msg + newline) rh.emit(self.next_rec()) fn = namer(self.fn + ".2") self.assertLogFile(fn) with open(fn, "rb") as f: compressed = f.read() data = zlib.decompress(compressed) self.assertEqual(data.decode("ascii"), m1.msg + newline) rh.emit(self.next_rec()) fn = namer(self.fn + ".2") with open(fn, "rb") as f: compressed = f.read() data = zlib.decompress(compressed) self.assertEqual(data.decode("ascii"), m2.msg + newline) self.assertFalse(os.path.exists(namer(self.fn + ".3"))) rh.close() class TimedRotatingFileHandlerTest(BaseFileTest): # other test methods added below def test_rollover(self): fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S', backupCount=1) fmt = logging.Formatter('%(asctime)s %(message)s') fh.setFormatter(fmt) r1 = logging.makeLogRecord({'msg': 'testing - initial'}) fh.emit(r1) self.assertLogFile(self.fn) time.sleep(1.1) # a little over a second ... r2 = logging.makeLogRecord({'msg': 'testing - after delay'}) fh.emit(r2) fh.close() # At this point, we should have a recent rotated file which we # can test for the existence of. However, in practice, on some # machines which run really slowly, we don't know how far back # in time to go to look for the log file. So, we go back a fair # bit, and stop as soon as we see a rotated file. In theory this # could of course still fail, but the chances are lower. found = False now = datetime.datetime.now() GO_BACK = 5 * 60 # seconds for secs in range(GO_BACK): prev = now - datetime.timedelta(seconds=secs) fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S") found = os.path.exists(fn) if found: self.rmfiles.append(fn) break msg = 'No rotated files found, went back %d seconds' % GO_BACK if not found: #print additional diagnostics dn, fn = os.path.split(self.fn) files = [f for f in os.listdir(dn) if f.startswith(fn)] print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr) print('The only matching files are: %s' % files, file=sys.stderr) for f in files: print('Contents of %s:' % f) path = os.path.join(dn, f) with open(path, 'r') as tf: print(tf.read()) self.assertTrue(found, msg=msg) def test_invalid(self): assertRaises = self.assertRaises assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler, self.fn, 'X', delay=True) assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler, self.fn, 'W', delay=True) assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler, self.fn, 'W7', delay=True) def test_compute_rollover_daily_attime(self): currentTime = 0 atTime = datetime.time(12, 0, 0) rh = logging.handlers.TimedRotatingFileHandler( self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True, atTime=atTime) try: actual = rh.computeRollover(currentTime) self.assertEqual(actual, currentTime + 12 * 60 * 60) actual = rh.computeRollover(currentTime + 13 * 60 * 60) self.assertEqual(actual, currentTime + 36 * 60 * 60) finally: rh.close() #@unittest.skipIf(True, 'Temporarily skipped while failures investigated.') def test_compute_rollover_weekly_attime(self): currentTime = int(time.time()) today = currentTime - currentTime % 86400 atTime = datetime.time(12, 0, 0) wday = time.gmtime(today).tm_wday for day in range(7): rh = logging.handlers.TimedRotatingFileHandler( self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True, atTime=atTime) try: if wday > day: # The rollover day has already passed this week, so we # go over into next week expected = (7 - wday + day) else: expected = (day - wday) # At this point expected is in days from now, convert to seconds expected *= 24 * 60 * 60 # Add in the rollover time expected += 12 * 60 * 60 # Add in adjustment for today expected += today actual = rh.computeRollover(today) if actual != expected: print('failed in timezone: %d' % time.timezone) print('local vars: %s' % locals()) self.assertEqual(actual, expected) if day == wday: # goes into following week expected += 7 * 24 * 60 * 60 actual = rh.computeRollover(today + 13 * 60 * 60) if actual != expected: print('failed in timezone: %d' % time.timezone) print('local vars: %s' % locals()) self.assertEqual(actual, expected) finally: rh.close() def secs(**kw): return datetime.timedelta(**kw) // datetime.timedelta(seconds=1) for when, exp in (('S', 1), ('M', 60), ('H', 60 * 60), ('D', 60 * 60 * 24), ('MIDNIGHT', 60 * 60 * 24), # current time (epoch start) is a Thursday, W0 means Monday ('W0', secs(days=4, hours=24)), ): def test_compute_rollover(self, when=when, exp=exp): rh = logging.handlers.TimedRotatingFileHandler( self.fn, when=when, interval=1, backupCount=0, utc=True) currentTime = 0.0 actual = rh.computeRollover(currentTime) if exp != actual: # Failures occur on some systems for MIDNIGHT and W0. # Print detailed calculation for MIDNIGHT so we can try to see # what's going on if when == 'MIDNIGHT': try: if rh.utc: t = time.gmtime(currentTime) else: t = time.localtime(currentTime) currentHour = t[3] currentMinute = t[4] currentSecond = t[5] # r is the number of seconds left between now and midnight r = logging.handlers._MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + currentSecond) result = currentTime + r print('t: %s (%s)' % (t, rh.utc), file=sys.stderr) print('currentHour: %s' % currentHour, file=sys.stderr) print('currentMinute: %s' % currentMinute, file=sys.stderr) print('currentSecond: %s' % currentSecond, file=sys.stderr) print('r: %s' % r, file=sys.stderr) print('result: %s' % result, file=sys.stderr) except Exception: print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr) self.assertEqual(exp, actual) rh.close() setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover) @unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.') class NTEventLogHandlerTest(BaseTest): def test_basic(self): logtype = 'Application' elh = win32evtlog.OpenEventLog(None, logtype) num_recs = win32evtlog.GetNumberOfEventLogRecords(elh) try: h = logging.handlers.NTEventLogHandler('test_logging') except pywintypes.error as e: if e.winerror == 5: # access denied raise unittest.SkipTest('Insufficient privileges to run test') raise r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) h.close() # Now see if the event is recorded self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh)) flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \ win32evtlog.EVENTLOG_SEQUENTIAL_READ found = False GO_BACK = 100 events = win32evtlog.ReadEventLog(elh, flags, GO_BACK) for e in events: if e.SourceName != 'test_logging': continue msg = win32evtlogutil.SafeFormatMessage(e, logtype) if msg != 'Test Log Message\r\n': continue found = True break msg = 'Record not found in event log, went back %d records' % GO_BACK self.assertTrue(found, msg=msg) class MiscTestCase(unittest.TestCase): def test__all__(self): blacklist = {'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe', 'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle', 'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root', 'threading'} support.check__all__(self, logging, blacklist=blacklist) # Set the locale to the platform-dependent default. I have no idea # why the test does this, but in any case we save the current locale # first and restore it at the end. @support.run_with_locale('LC_ALL', '') def test_main(): tests = [ BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest, HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest, DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest, ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest, StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest, QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest, LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest, RotatingFileHandlerTest, LastResortTest, LogRecordTest, ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest, NTEventLogHandlerTest, TimedRotatingFileHandlerTest, UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest, MiscTestCase ] if hasattr(logging.handlers, 'QueueListener'): tests.append(QueueListenerTest) support.run_unittest(*tests) if __name__ == "__main__": test_main()
test_realworld_ros_final.py
#!/usr/bin/env python # -------------------------------------------------------- # Licensed under The MIT License [see LICENSE for details] # -------------------------------------------------------- # for testing import argparse import datetime import numpy as np import itertools from core.bc import BC from core.ddpg import DDPG from tensorboardX import SummaryWriter from experiments.config import * from core.replay_memory import BaseMemory as ReplayMemory from core import networks from core.utils import * import IPython import matplotlib.pyplot as plt import torch import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.utils.data import cv2 import torch.nn as nn import threading import argparse import pprint import time, os, sys import os.path as osp import numpy as np import copy from core.env_planner import EnvPlanner from OMG.omg.config import cfg as planner_cfg # try: # ros import tf import tf2_ros import rosnode import message_filters import _init_paths import rospy import tf.transformations as tra import std_msgs.msg from sensor_msgs.msg import Image, CameraInfo from sensor_msgs.msg import PointCloud2, PointField from visualization_msgs.msg import MarkerArray, Marker from geometry_msgs.msg import Pose, PoseArray, Point, Quaternion from sensor_msgs import point_cloud2 from cv_bridge import CvBridge, CvBridgeError lock = threading.Lock() # for real robot from lula_franka.franka import Franka from joint_listener import JointListener from moveit import MoveitBridge # use posecnn layer for backprojection import posecnn_cuda # graspnet import tensorflow sys.path.insert(0, '6dof-graspnet') # set policy mode GA_DDPG_ONLY = True GRASPNET_ONLY = False COMBINED = False RANDOM_TARGET = False USE_LOOK_AT = False CONTACT_GRASPNET = False PUT_BIN = False # contact graspnet from grasp_estimator import GraspEstimator, get_graspnet_config, joint_config if CONTACT_GRASPNET: sys.path.insert(0, 'contact_graspnet') sys.path.insert(0, 'contact_graspnet/contact_graspnet') from inference_edit import get_graspnet_config as get_graspnet_config_contact from contact_grasp_estimator import GraspEstimator as GraspEstimatorContact import config_utils # compute look at pose according to object pose def compute_look_at_pose(pose_listener, center_object, angle, distance, psi=0): # find the hand camera to hand transformation try: tf_pose = pose_listener.lookupTransform('measured/camera_color_optical_frame', 'measured/right_gripper', rospy.Time(0)) pose_camera = make_pose(tf_pose) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): pose_camera = None if pose_camera is not None: pose_camera[:3, :3] = np.eye(3) pose_camera[:3, 3] *= -1 else: print('cannot find camera to hand transformation') psi /= 57.3 theta = angle / 57.3 r = distance position_robot = center_object + np.array([-r * np.cos(theta) * np.cos(psi), -r * np.cos(theta) * np.sin(psi), r * np.sin(theta)], dtype=np.float32) Z_BG = center_object - position_robot Z_BG /= np.linalg.norm(Z_BG) Y_BG = np.array([-np.sin(psi), np.cos(psi), 0], dtype=np.float32) X_BG = np.cross(Y_BG, Z_BG) R_BG = np.zeros((3, 3), dtype=np.float32) R_BG[:, 0] = X_BG R_BG[:, 1] = Y_BG R_BG[:, 2] = Z_BG pose_robot = np.eye(4, dtype=np.float32) pose_robot[:3, 3] = position_robot pose_robot[:3, :3] = R_BG[:3, :3] # adjust for camera offset if pose_camera is not None: pose_robot = np.dot(pose_camera, pose_robot) return pose_robot class ImageListener: def __init__(self, agent, graspnet, graspnet_contact): franka = Franka(is_physical_robot=True) self.moveit = MoveitBridge(franka) self.moveit.retract() # self.moveit.close_gripper() self.moveit.open_gripper() self.joint_listener = JointListener() self.pose_listener = tf.TransformListener() print('sleep a short time') rospy.sleep(2.0) print('current robot joints') print(self.joint_listener.joint_position) tf_pose = self.pose_listener.lookupTransform('measured/panda_hand', 'measured/right_gripper', rospy.Time(0)) self.grasp_offset = make_pose(tf_pose) print('grasp offset', self.grasp_offset) self.agent = agent self.graspnet = graspnet self.graspnet_contact = graspnet_contact self.cv_bridge = CvBridge() self.im = None self.depth = None self.rgb_frame_id = None self.rgb_frame_stamp = None self.im_ef_pose = None self.acc_points = np.zeros([4, 0]) self.depth_threshold = 1.2 self.table_height = 0.0 self.initial_joints = initial_joints self.num_initial_joints = initial_joints.shape[0] self.index_joints = 0 self.target_obj_id = 1 # target object ID # publish object points for visualization self.empty_msg = PointCloud2() self.object_points2_target_pub = rospy.Publisher('/gaddpg_object_points2_target', PointCloud2, queue_size=10) self.object_points2_obstacle_pub = rospy.Publisher('/gaddpg_object_points2_obstacle', PointCloud2, queue_size=10) # initialize a node self.label_sub = message_filters.Subscriber('seg_label', Image, queue_size=1) self.hand_finger_point = np.array([ [ 0., 0., 0. , -0. , 0. , -0. ], [ 0., 0., 0.053, -0.053, 0.053, -0.053], [ 0., 0., 0.075, 0.075, 0.105, 0.105]]) self.bin_conf_1 = np.array([0.7074745589850109, 0.361727706885124, 0.38521270434333, -1.1754794559646125, -0.4169872830046795, 1.7096866963969337, 1.654512471818922]).astype(np.float32) self.bin_conf_2 = np.array([0.5919747534674433, 0.7818432665691674, 0.557417382701195, -1.1647884021323738, -0.39191044586242046, 1.837464805311654, 1.9150514982533562]).astype(np.float32) if cfg.ROS_CAMERA == 'D415': # use RealSense D435 self.base_frame = 'measured/base_link' camera_name = 'cam_2' rgb_sub = message_filters.Subscriber('/%s/color/image_raw' % camera_name, Image, queue_size=1) depth_sub = message_filters.Subscriber('/%s/aligned_depth_to_color/image_raw' % camera_name, Image, queue_size=1) msg = rospy.wait_for_message('/%s/color/camera_info' % camera_name, CameraInfo) self.camera_frame = 'measured/camera_color_optical_frame' self.target_frame = self.base_frame elif cfg.ROS_CAMERA == 'Azure': self.base_frame = 'measured/base_link' rgb_sub = message_filters.Subscriber('/k4a/rgb/image_raw', Image, queue_size=1) depth_sub = message_filters.Subscriber('/k4a/depth_to_rgb/image_raw', Image, queue_size=1) msg = rospy.wait_for_message('/k4a/rgb/camera_info', CameraInfo) self.camera_frame = 'rgb_camera_link' self.target_frame = self.base_frame else: # use kinect self.base_frame = '%s_rgb_optical_frame' % (cfg.ROS_CAMERA) rgb_sub = message_filters.Subscriber('/%s/rgb/image_color' % (cfg.ROS_CAMERA), Image, queue_size=1) depth_sub = message_filters.Subscriber('/%s/depth_registered/image' % (cfg.ROS_CAMERA), Image, queue_size=1) msg = rospy.wait_for_message('/%s/rgb/camera_info' % (cfg.ROS_CAMERA), CameraInfo) self.camera_frame = '%s_rgb_optical_frame' % (cfg.ROS_CAMERA) self.target_frame = self.base_frame # update camera intrinsics intrinsics = np.array(msg.K).reshape(3, 3) self.fx = intrinsics[0, 0] self.fy = intrinsics[1, 1] self.px = intrinsics[0, 2] self.py = intrinsics[1, 2] print(intrinsics) queue_size = 1 slop_seconds = 0.4 ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub, self.label_sub], queue_size, slop_seconds) ts.registerCallback(self.callback_rgbdm) # set global intrinsics and extrinsics global INTRINSICS, EXTRINSICS INTRINSICS = intrinsics EXTRINSICS = np.zeros([4, 4])# from camera to end effector EXTRINSICS[:3, 3] = (np.array([0.05253322227958818, -0.05414890498307623, 0.06035263861136299])) # camera offset EXTRINSICS[:3, :3] = quat2mat([0.7182116422267757, 0.016333297635292354, 0.010996322012974747, 0.6955460741463947]) self.remaining_step = cfg.RL_MAX_STEP # start publishing thread self.start_publishing_tf() self.planner = EnvPlanner() self.expert_plan = [] self.standoff_idx = -1 self.has_plan = False self.num_trial = 0 # threshold to close gripper self.grasp_score_threshold = 0.4 def compute_plan_with_gaddpg(self, state, ef_pose, vis=False): """ generate initial expert plan """ joints = get_joints(self.joint_listener) gaddpg_grasps_from_simulate_view(self.agent, state, cfg.RL_MAX_STEP, ef_pose) print('finish simulate views') # can use remaining timesteps to replan. Set vis to visualize collision and traj self.expert_plan, self.standoff_idx = self.planner.expert_plan(cfg.RL_MAX_STEP, joints, ef_pose, state[0][0], vis=vis) print('expert plan', self.expert_plan.shape) print('standoff idx', self.standoff_idx) def start_publishing_tf(self): self.stop_event = threading.Event() self.tf_thread = threading.Thread(target=self.publish_point_cloud) self.tf_thread.start() def publish_point_cloud(self): rate = rospy.Rate(30.) fields = [ PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1)] while not self.stop_event.is_set() and not rospy.is_shutdown(): header = std_msgs.msg.Header() header.stamp = rospy.Time.now() header.frame_id = self.base_frame out_xyz = self.acc_points[:3, :].T label = self.acc_points[3, :].flatten() target_xyz = out_xyz[label == 0, :] obj_pc2_target = point_cloud2.create_cloud(header, fields, target_xyz) self.object_points2_target_pub.publish(obj_pc2_target) obstacle_xyz = out_xyz[label == 1, :] obj_pc2_obstacle = point_cloud2.create_cloud(header, fields, obstacle_xyz) self.object_points2_obstacle_pub.publish(obj_pc2_obstacle) # if out_xyz.shape[0] > 0: # print('publish points') # print(out_xyz.shape) rate.sleep() def callback_rgbdm(self, rgb, depth, mask): ef_pose = get_ef_pose(self.pose_listener) if depth.encoding == '32FC1': depth_cv = self.cv_bridge.imgmsg_to_cv2(depth) elif depth.encoding == '16UC1': depth_cv = self.cv_bridge.imgmsg_to_cv2(depth).copy().astype(np.float32) depth_cv /= 1000.0 else: rospy.logerr_throttle( 1, 'Unsupported depth type. Expected 16UC1 or 32FC1, got {}'.format( depth.encoding)) return im = self.cv_bridge.imgmsg_to_cv2(rgb, 'bgr8') mask = self.cv_bridge.imgmsg_to_cv2(mask, 'mono8') # rescale image if necessary # Lirui: consider rescaling to 112 x 112 which is used in training (probably not necessary) if cfg.SCALES_BASE[0] != 1: im_scale = cfg.SCALES_BASE[0] im = pad_im(cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR), 16) depth_cv = pad_im(cv2.resize(depth_cv, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST), 16) mask = pad_im(cv2.resize(mask, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST), 16) with lock: self.im = im.copy() self.im_ef_pose = ef_pose.copy() self.mask = mask.copy() self.depth = depth_cv.copy() self.rgb_frame_id = rgb.header.frame_id self.rgb_frame_stamp = rgb.header.stamp def show_segmentation_result(self, color, mask, mask_ids): image = color.copy() for i in range(len(mask_ids)): mask_id = mask_ids[i] index = np.where(mask == mask_id) x = int(np.mean(index[1])) y = int(np.mean(index[0])) image = cv2.putText(image, str(i+1), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 2, cv2.LINE_AA) cv2.namedWindow("Display 1") cv2.imshow("Display 1", image) cv2.waitKey(0) cv2.destroyAllWindows() value = input('Please enter which object to pick up: ') return int(value) def find_target_object(self, depth, mask, mask_ids, ef_pose, remaining_step, vis=False): # select target points target_mask = get_target_mask(self.acc_points) points = self.acc_points[:3, target_mask] # sample points points = regularize_pc_point_count(points.T, 1024, use_farthest_point=True).T # base to hand points = se3_transform_pc(se3_inverse(ef_pose), points) # hand to camera offset_pose = se3_inverse(EXTRINSICS) xyz_points = offset_pose[:3, :3].dot(points) + offset_pose[:3, [3]] # projection to image p_xyz = INTRINSICS.dot(xyz_points) index = p_xyz[2] > 0.03 p_xyz = p_xyz[:, index] xyz_points = xyz_points[:, index] x, y = (p_xyz[0] / p_xyz[2]).astype(np.int), (p_xyz[1] / p_xyz[2]).astype(np.int) # bounding box x1 = np.min(x) x2 = np.max(x) y1 = np.min(y) y2 = np.max(y) area = (x2 - x1 + 1) * (y2 - y1 + 1) # check labels valid_idx_mask = (x > 0) * (x < mask.shape[1] - 1) * (y > 0) * (y < mask.shape[0] - 1) labels = mask[y[valid_idx_mask], x[valid_idx_mask]] labels_nonzero = labels[labels > 0] xyz_points = xyz_points[:, valid_idx_mask] # find the marjority label if float(len(labels_nonzero)) / float((len(labels) + 1)) < 0.5: print('overlap to background') target_id = -1 else: target_id = np.bincount(labels_nonzero).argmax() # check bounding box overlap I = np.where(mask == target_id) x11 = np.min(I[1]) x22 = np.max(I[1]) y11 = np.min(I[0]) y22 = np.max(I[0]) area1 = (x22 - x11 + 1) * (y22 - y11 + 1) xx1 = np.maximum(x1, x11) yy1 = np.maximum(y1, y11) xx2 = np.minimum(x2, x22) yy2 = np.minimum(y2, y22) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (area + area1 - inter) print('overlap', ovr) if ovr < 0.3: target_id = -1 # projected depth depths = depth[y[valid_idx_mask], x[valid_idx_mask]] # computed depth z = xyz_points[2, :] diff = np.mean(np.absolute(depths - z)) print('mean depth diff', diff) if diff > 0.15: target_id = -1 # if remaining_step == cfg.RL_MAX_STEP - 1 and target_id != -1: # self.acc_points = np.zeros([4, 0]) if vis: # show image fig = plt.figure() ax = fig.add_subplot(1, 1, 1) plt.imshow(mask) plt.scatter(x[valid_idx_mask], y[valid_idx_mask], s=10) # plt.show() plt.show(block=False) plt.pause(1) plt.close() return target_id def print_joint(self, joint): num = len(joint) s = '' for i in range(num): s += '%.6f, ' % rad2deg(joint[i]) print(s) def process_label(self, foreground_labels): """ Process foreground_labels - Map the foreground_labels to {0, 1, ..., K-1} @param foreground_labels: a [H x W] numpy array of labels @return: foreground_labels """ # Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1} unique_nonnegative_indices = np.unique(foreground_labels) mapped_labels = foreground_labels.copy() for k in range(unique_nonnegative_indices.shape[0]): mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k foreground_labels = mapped_labels return foreground_labels def compute_grasp_object_distance(self, RT_grasp): T = RT_grasp[:3, 3].reshape((3, 1)) # target points index = self.acc_points[3, :] == 0 points = self.acc_points[:3, index] n = points.shape[1] hand = np.repeat(T, n, axis=1) distances = np.linalg.norm(hand - points, axis=0) return np.min(distances) def run_network(self): # sample an initial joint if self.remaining_step == cfg.RL_MAX_STEP: print('use initial joint %d' % (self.index_joints)) initial_joints = self.initial_joints[self.index_joints, :] self.moveit.go_local(q=initial_joints, wait=True) rospy.sleep(1.0) with lock: if listener.im is None: print('no image') return color = self.im.copy() depth = self.depth.copy() mask = self.mask.copy() im_ef_pose = self.im_ef_pose.copy() rgb_frame_id = self.rgb_frame_id rgb_frame_stamp = self.rgb_frame_stamp print('===========================================') # process mask mask = self.process_label(mask) mask_ids = np.unique(mask) if mask_ids[0] == 0: mask_ids = mask_ids[1:] num = mask_ids.shape[0] mask_failure = (num == 0) # no mask for the first frame if mask_failure and self.remaining_step == cfg.RL_MAX_STEP: print('no object segmented') raw_input('put objects in the scene?') return count = np.zeros((num, ), dtype=np.int32) for i in range(num): count[i] = len(np.where(mask == mask_ids[i])[0]) # show the segmentation start_time = time.time() if self.remaining_step == cfg.RL_MAX_STEP: print('%d objects segmented' % num) print(mask_ids) if not RANDOM_TARGET: label_max = np.argmax(count) target_id = mask_ids[label_max] else: target_id = self.show_segmentation_result(color, mask, mask_ids) ''' while True: target_id = np.random.choice(mask_ids) # check number of pixels for the target num_pixels = np.sum(mask == target_id) if num_pixels > 500: print('%d target pixels' % num_pixels) break ''' elif num > 0: # data association to find the target id for the current frame target_id = self.find_target_object(depth, mask, mask_ids, im_ef_pose, self.remaining_step, vis=False) else: target_id = -1 self.target_obj_id = target_id print('target id is %d' % target_id) print("---select target time %s seconds ---" % (time.time() - start_time)) if self.remaining_step == cfg.RL_MAX_STEP and not args.fix_initial_state: self.index_joints += 1 if self.index_joints >= self.num_initial_joints: self.index_joints = 0 # process target mask start_time = time.time() mask_background = np.zeros_like(mask) mask_background[mask == 0] = 1 if num > 0: # update this for 0 background and 1-N for other target mask_target = np.zeros_like(mask) mask_target[mask == target_id] = 1 # erode target mask mask_target = cv2.erode(mask_target, np.ones((7, 7), np.uint8), iterations=3) num_pixels = np.sum(mask_target) print('finish mask, %d foreground pixels' % (num_pixels)) # build the final mask mask[(mask == target_id) & (mask_target == 0)] = 0 mask_final = mask.copy() else: mask_final = np.zeros_like(mask) print("---process mask time %s seconds ---" % (time.time() - start_time)) # compute state start_time = time.time() depth = depth[...,None] agg = (not mask_failure) and (self.remaining_step >= cfg.RL_MAX_STEP - 1) state, point_background = self.camera_image_to_state( color, depth, mask_final, mask_background, im_ef_pose, cfg.RL_MAX_STEP - self.remaining_step, agg=agg, vis=False) print('after camera image to state', state[0].shape) print('background point shape', point_background.shape) print("---compute state time %s seconds ---" % (time.time() - start_time)) # compute action state = [state, None, None, None] # look at target if self.remaining_step == cfg.RL_MAX_STEP and USE_LOOK_AT: index = self.acc_points[3, :] == 0 points = self.acc_points[:3, index] center = np.mean(points, axis=1) angle = 60 T_lookat = compute_look_at_pose(self.pose_listener, center, angle=angle, distance=0.45) self.moveit.go_local(T_lookat, wait=True) self.remaining_step = max(self.remaining_step-1, 1) rospy.sleep(0.5) return # GRASPNET + OMG + GA-DDPG # run graspnet if (not self.has_plan and COMBINED) or (GRASPNET_ONLY and not GA_DDPG_ONLY): point_state = state[0][0].copy() # avoid aggregation print('point_state', point_state.shape) target_mask = point_state[3, :] == 0 target_pt = point_state[:3, target_mask].T print('target_pt', target_pt.shape) if CONTACT_GRASPNET: # only for target # pc_full: (493949, 3), pc_colors: (493949, 3), pc_segments: dict (idx: (13481, 3)), local_regions True filter_grasps True forward_passes 1 pc_segments = {'0': target_pt} point_full = point_state[:3,6:-500].T print('point_full', point_full.shape) # all points. You need to add table point here pred_grasps_cam, scores, contact_pts, _ = self.graspnet_contact.predict_scene_grasps(sess_contact, point_full, pc_segments=pc_segments, local_regions=True, filter_grasps=True, forward_passes=1) # pred_grasps_cam: dict (idx: (N, 4, 4)), scores: dict (idx: (N, 1)), contact_pts: dict (idx: (N, 3)) generated_grasps = pred_grasps_cam['0'] generated_scores = scores['0'] print('generated contact grasps', generated_grasps.shape) else: latents = self.graspnet.sample_latents() generated_grasps, generated_scores, _ = self.graspnet.predict_grasps( sess, target_pt.copy(), latents, num_refine_steps=10, ) # select grasps top_num = 100 # grasp num sorted_idx = list(np.argsort(generated_scores))[::-1] select_grasp = [generated_grasps[idx] for idx in sorted_idx[:top_num]] select_grasp_score = [generated_scores[idx] for idx in sorted_idx[:top_num]] print('mean select grasp score: {:.3f}'.format(np.mean(np.round(select_grasp_score, 3)))) goal_states = np.array([im_ef_pose.dot(g.dot(rotZ(np.pi / 2))) for g in select_grasp]) # might not need rotate print(goal_states.shape) if goal_states.shape[0] == 0: return # use OMG in this repo planner_cfg.use_external_grasp = True planner_cfg.external_grasps = goal_states # this sets the grasps in base coordinate joints = get_joints(self.joint_listener) # construct scene points num = point_state.shape[1] + point_background.shape[1] scene_points = np.ones((4, num), dtype=np.float32) scene_points[:, :point_state.shape[1]] = point_state.copy() scene_points[:3, point_state.shape[1]:] = point_background.copy() step = 30 plan, standoff_idx = self.planner.expert_plan(step, joints, im_ef_pose, scene_points, vis=False) self.has_plan = True print('expert plan', plan.shape) # execute plan to standoff if COMBINED: self.moveit.execute(plan[:standoff_idx-5]) self.remaining_step = 10 print('*****************switch to gaddpg****************') rospy.sleep(1.0) else: self.moveit.execute(plan[:standoff_idx]) self.moveit.execute(plan[standoff_idx:]) rospy.sleep(1.0) if PUT_BIN: self.put_bin() else: self.retract() self.acc_points = np.zeros([4, 0]) self.remaining_step = cfg.RL_MAX_STEP else: if self.termination_heuristics(state) or self.num_trial >= 5: if self.num_trial >= 5: print('********************trial exceed********************') if PUT_BIN: self.put_bin() else: self.retract() # reset self.acc_points = np.zeros([4, 0]) self.remaining_step = cfg.RL_MAX_STEP self.has_plan = False self.num_trial = 0 return # run ga-ddpg print('use ga-ddpg') target_state = select_target_point(state) # only target points action, _, _, aux_pred = self.agent.select_action(target_state, remain_timestep=self.remaining_step) print('finish network') pose_delta = unpack_action(action) ef_pose = get_ef_pose(self.pose_listener) ef_pose = ef_pose.dot(pose_delta) RT_grasp = ef_pose.dot(self.grasp_offset) vis_pose = ef_pose.copy() # send_transform(RT_grasp, vis_pose, 'GADDPG_action') self.moveit.go_local(RT_grasp, wait=True) print('remaining step: {} aggr. point: {}'.format(self.remaining_step, self.acc_points.shape[1])) # raw_input('next step?') self.remaining_step = max(self.remaining_step-1, 1) if self.remaining_step == 1: self.remaining_step += 5 self.num_trial += 1 def retract(self): """ close finger and lift """ # close the gripper self.moveit.close_gripper(force=60) rospy.sleep(1.0) # lift object delta = 0.20 joints = get_joints(self.joint_listener) T = self.moveit.forward_kinematics(joints[:-2]) print('T in retract', T) T_lift = T.copy() T_lift[2, 3] += delta self.moveit.go_local(T_lift, wait=True) # wait a few seconds rospy.sleep(2.0) # put object down T_put = T.copy() T_put[2, 3] += 0.01 self.moveit.go_local(T_put, wait=True) self.moveit.open_gripper() self.moveit.go_local(T_lift, wait=True) if GA_DDPG_ONLY: self.moveit.retract() else: step = 20 joint_position = get_joints(self.joint_listener) end_conf = np.append(self.moveit.home_q, joint_position[7:]) traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :] self.moveit.execute(traj) raw_input('finished. Try again?') # grasp object and put object into a bin with goal conf def put_bin(self): force_before = self.joint_listener.robot_force print('force before grasping', force_before) # close the gripper self.moveit.close_gripper(force=60) rospy.sleep(0.5) # lift object a bit delta = 0.05 joints = get_joints(self.joint_listener) T = self.moveit.forward_kinematics(joints[:-2]) print('T in retract', T) T_lift = T.copy() T_lift[2, 3] += delta self.moveit.go_local(T_lift, wait=True) force_after = self.joint_listener.robot_force print('force after grasping', force_after) force_diff = np.linalg.norm(force_before - force_after) print('force diff norm', force_diff) # lift object more delta = 0.30 joints = get_joints(self.joint_listener) T = self.moveit.forward_kinematics(joints[:-2]) print('T in retract', T) T_lift = T.copy() T_lift[2, 3] += delta self.moveit.go_local(T_lift, wait=True) # check grasp success joint_position = self.joint_listener.joint_position print('check success', joint_position) if joint_position[-1] > 0.002 or force_diff > 0.5 or force_diff == 0: success = True print('grasp success') else: success = False print('grasp fail') # plan to goal conf step = 20 if success: joint_position = get_joints(self.joint_listener) end_conf = np.append(self.bin_conf_1, joint_position[7:]) traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :] self.moveit.execute(traj) joint_position = get_joints(self.joint_listener) end_conf = np.append(self.bin_conf_2, joint_position[7:]) traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :] self.moveit.execute(traj) self.moveit.open_gripper() joint_position = get_joints(self.joint_listener) end_conf = np.append(self.moveit.home_q, joint_position[7:]) traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :] self.moveit.execute(traj) self.moveit.open_gripper() def bias_target_pc_regularize(self, point_state, total_point_num, target_pt_num=1024, use_farthest_point=True): target_mask = point_state[3, :] == 0 target_pt = point_state[:, target_mask] nontarget_pt = point_state[:, ~target_mask] print(target_pt.shape, nontarget_pt.shape) if target_pt.shape[1] > 0: target_pt = regularize_pc_point_count(target_pt.T, target_pt_num, use_farthest_point).T if nontarget_pt.shape[1] > 0: effective_target_pt_num = min(target_pt_num, target_pt.shape[1]) nontarget_pt = regularize_pc_point_count(nontarget_pt.T, total_point_num - effective_target_pt_num, use_farthest_point).T return np.concatenate((target_pt, nontarget_pt), axis=1) # new_points is in hand coordinate # ACC_POINTS is in base def update_curr_acc_points(self, new_points, ef_pose, step): """ Update accumulated points in world coordinate """ new_points = se3_transform_pc(ef_pose, new_points) # the number below can be adjusted for efficiency and robustness aggr_sample_point_num = min(int(CONFIG.pt_accumulate_ratio**step * CONFIG.uniform_num_pts), new_points.shape[1]) index = np.random.choice(range(new_points.shape[1]), size=aggr_sample_point_num, replace=False).astype(np.int) new_points = new_points[:,index] print('new points before filtering with table height', new_points.shape) index = new_points[2, :] > self.table_height new_points = new_points[:, index] print('new points {} total point {}'.format(new_points.shape, self.acc_points.shape)) self.acc_points = np.concatenate((new_points, self.acc_points), axis=1) # self.acc_points = regularize_pc_point_count(self.acc_points.T, 4096, use_farthest_point=True).T # if it still grows too much, can limit points by call regularize pc point count # self.planner.expert_plan can also be called with these dense points directly def goal_closure(self, action, goal): action_2 = np.zeros(7) action_2[-3:] = action[:3] action_2[:-3] = mat2quat(euler2mat(action[3], action[4], action[5])) # euler to quat point_dist = float(agent.goal_pred_loss(torch.from_numpy(goal)[None].float().cuda(), torch.from_numpy(action_2)[None].float().cuda())) print('point dist: {:.3f}'.format(point_dist)) return point_dist < 0.008 def graspnet_closure(self, point_state): """ Compute grasp quality from tf grasp net. """ score = self.graspnet.compute_grasps_score(sess, point_state) print('grasp closure score:', score) return score > self.grasp_score_threshold # tuned threshold # point_state is in hand coordinate def process_pointcloud(self, point_state, im_ef_pose, step, agg=True, use_farthest_point=False): """ Process the cluttered scene point_state [0 - 6]: random or gripper points with mask -1 [6 - 1030]: target point with mask 0 [1030 - 5002]: obstacle point with mask 1 [5002 - 5502]: robot points with mask 2 can be random or generated with get_collision_points and transform with joint """ # accumulate all point state in base # set the mask 0 as target, 1 as other objects index_target = point_state[3, :] == self.target_obj_id index_other = point_state[3, :] != self.target_obj_id point_state[3, index_target] = 0. point_state[3, index_other] = 1. if agg: self.update_curr_acc_points(point_state, im_ef_pose, step) # base to hand inv_ef_pose = se3_inverse(im_ef_pose) point_state = se3_transform_pc(inv_ef_pose, self.acc_points) point_state = self.bias_target_pc_regularize(point_state, CONFIG.uniform_num_pts) hand_finger_point = np.concatenate([self.hand_finger_point, np.ones((1, self.hand_finger_point.shape[1]), dtype=np.float32)], axis=0) point_state = np.concatenate([hand_finger_point, point_state], axis=1) point_state_ = point_state.copy() point_state_[3, :hand_finger_point.shape[1]] = -1 # ignore robot points make sure it's 6 + 4096 + 500 point_state_ = np.concatenate((point_state_, np.zeros((4, 500))), axis=1) point_state_[3, -500:] = 2 return point_state_ def camera_image_to_state(self, rgb, depth, mask, mask_background, im_ef_pose, step, agg=True, vis=False): """ map from camera images and segmentations to object point cloud in robot coordinate mask: 0 represents target, 1 everything else mask: w x h x 1 rgb: w x h x 3 depth:w x h x 1 """ if vis: fig = plt.figure(figsize=(14.4, 4.8)) ax = fig.add_subplot(1, 3, 1) plt.imshow(rgb[:, :, (2, 1, 0)]) ax = fig.add_subplot(1, 3, 2) plt.imshow(depth[...,0]) ax = fig.add_subplot(1, 3, 3) plt.imshow(mask) plt.show() mask_target = np.zeros_like(mask) mask_target[mask == self.target_obj_id] = 1 mask_state = 1 - mask_target[...,None] image_state = np.concatenate([rgb, depth, mask_state], axis=-1) image_state = image_state.T # depth to camera, all the points on foreground objects # backproject depth depth_cuda = torch.from_numpy(depth).cuda() fx = INTRINSICS[0, 0] fy = INTRINSICS[1, 1] px = INTRINSICS[0, 2] py = INTRINSICS[1, 2] im_pcloud = posecnn_cuda.backproject_forward(fx, fy, px, py, depth_cuda)[0].cpu().numpy() # select points valid = (depth[...,0] != 0) * (mask > 0) point_xyz = im_pcloud[valid, :].reshape(-1, 3) label = mask[valid][...,None] point_state = np.concatenate((point_xyz, label), axis=1).T # point_state = backproject_camera_target_realworld_clutter(depth, INTRINSICS, mask) print('%d foreground points' % point_state.shape[1]) # filter depth index = point_state[2, :] < self.depth_threshold point_state = point_state[:, index] # camera to hand point_state = se3_transform_pc(EXTRINSICS, point_state) # background points valid = (depth[...,0] != 0) * (mask_background > 0) point_background = im_pcloud[valid, :].reshape(-1, 3) index = point_background[:, 2] < self.depth_threshold point_background = point_background[index, :] if point_background.shape[0] > 0: point_background = regularize_pc_point_count(point_background, 1024, use_farthest_point=False) point_background = se3_transform_pc(EXTRINSICS, point_background.T) # accumate points in base, and transform to hand again point_state = self.process_pointcloud(point_state, im_ef_pose, step, agg) obs = (point_state, image_state) return obs, point_background # state points and grasp are in hand coordinate def vis_realworld(self, state, rgb, grasp, local_view=True, curr_joint=None): """ visualize grasp and current observation local view (hand camera view with projected points) global view (with robot and accumulated points) this can be converted to ros """ ef_pose = get_ef_pose(self.pose_listener) if local_view: print('in vis realworld local view') # base to hand points = se3_transform_pc(se3_inverse(ef_pose), self.acc_points) rgb = rgb[:,:,::-1] rgb = proj_point_img(rgb, INTRINSICS, se3_inverse(EXTRINSICS), points[:3], real_world=True) grasp = unpack_pose_rot_first(grasp) # .dot(rotZ(np.pi/2)) rgb = draw_grasp_img(rgb, grasp, INTRINSICS, se3_inverse(EXTRINSICS), vis=True, real_world=True) # show image fig = plt.figure() ax = fig.add_subplot(1, 1, 1) plt.imshow(rgb) plt.show() else: print('in vis realworld global view') # global view point_color = [255, 255, 0] if curr_joint is None: curr_joint = get_joints(self.joint_listener) point_color = [0, 255, 0] poses_ = robot.forward_kinematics_parallel( wrap_value(curr_joint)[None], offset=True)[0] grasp = poses_[7].dot(unpack_pose_rot_first(grasp)) poses = [pack_pose(pose) for pose in poses_] line_starts, line_ends = grasp_gripper_lines(grasp[None]) # green: observation, yellow: simulation, red: cage point cage_points_mask, depth_heuristics = self.compute_cage_point_mask( ) noncage_points = self.acc_points[:3, ~cage_points_mask] cage_points = self.acc_points[:3, cage_points_mask] rgb = self.planner.planner_scene.renderer.vis(poses, list(range(10)), shifted_pose=np.eye(4), interact=2, V=np.array(V), visualize_context={ "white_bg": True, "project_point": [noncage_points, cage_points], "project_color": [[0, 255, 0], [255, 0, 0]], "static_buffer": True, "reset_line_point": True, "thickness": [2], "line": [(line_starts[0], line_ends[0])], "line_color": [[255, 0, 0]], } ) return rgb def compute_cage_point_mask(self): # points in global cooridnate index = self.acc_points[3, :] == 0 points = self.acc_points[:3, index] # base to hand ef_pose = get_ef_pose(self.pose_listener) inv_ef_pose = se3_inverse(ef_pose) point_state = se3_transform_pc(inv_ef_pose, points) # 0.11 cage_points_mask = (point_state[2] > 0.06) * (point_state[2] < 0.09) * \ (point_state[1] > -0.05) * (point_state[1] < 0.05) * \ (point_state[0] > -0.02) * (point_state[0] < 0.02) terminate = cage_points_mask.sum() > CAGE_POINT_THRESHOLD # maybe this is more robust (use_farthest_point)? cage_points_mask_reg = regularize_pc_point_count(cage_points_mask[:,None], CONFIG.uniform_num_pts, use_farthest_point=False) print('number of cage points %d' % cage_points_mask_reg.sum()) terminate = cage_points_mask_reg.sum() > CAGE_POINT_THRESHOLD return cage_points_mask, terminate def termination_heuristics(self, state): """ Target depth heuristics for determining if grasp can be executed. The threshold is based on depth in the middle of the camera and the finger is near the bottom two sides """ point_state = state[0][0] target_mask = get_target_mask(point_state) point_state = point_state[:3, target_mask].T depth_heuristics = self.graspnet_closure(point_state) if (depth_heuristics): print('object inside gripper? start retracting...') return depth_heuristics def preview_trajectory(self, state, remain_timestep, vis=False): """ use the current point cloud to simulate observation and action for a trajectory this can be used to check trajectory before execution """ print('in preview trajectory') state_origin = copy.deepcopy(state) sim_state = [state[0][0].copy(), state[0][1]] joints = get_joints(self.joint_listener) ef_pose = get_ef_pose(self.pose_listener) ef_pose_origin = ef_pose.copy() joint_plan = [joints] ef_pose_plan = [ef_pose] for episode_steps in range(remain_timestep): state[0] = sim_state gaddpg_input_state = select_target_point(state) step = min(max(remain_timestep - episode_steps, 1), 25) action, _, _, aux_pred = agent.select_action(gaddpg_input_state, remain_timestep=step) action_pose = unpack_action(action) ef_pose = ef_pose.dot(action_pose) joints = solve_ik(joints, pack_pose(ef_pose)) joint_plan.append(joints) ef_pose_plan.append(ef_pose) sim_next_point_state = se3_transform_pc(se3_inverse(action_pose), sim_state[0]) sim_state[0] = sim_next_point_state if vis: # vis entire traj. Might be useful poses_ = robot.forward_kinematics_parallel( wrap_value(joint_plan[0])[None], offset=True)[0] poses = [pack_pose(pose) for pose in poses_] line_starts, line_ends = grasp_gripper_lines(np.array(ef_pose_plan)) points = state_origin[0][0] points = se3_transform_pc(ef_pose_origin, points) point_color = get_point_color(points) rgb = self.planner.planner_scene.renderer.vis(poses, list(range(10)), shifted_pose=np.eye(4), interact=2, V=np.array(V), visualize_context={ "white_bg": True, "project_point": [points], "project_color": [point_color], "static_buffer": True, "reset_line_point": True, "thickness": [2], "line": [(line_starts[0], line_ends[0])], "line_color": [[255, 0, 0]], } ) num = len(joint_plan) traj = np.zeros((num, 9), dtype=np.float32) for i in range(num): traj[i, :] = joint_plan[i] return traj # for debuging def send_transform(T, ef_pose, name, base_frame='measured/base_link'): broadcaster = tf.TransformBroadcaster() marker_pub = rospy.Publisher(name, Marker, queue_size = 10) for i in range(100): print('sending transformation {}'.format(name)) qt = mat2quat(T[:3, :3]) broadcaster.sendTransform(T[:3, 3], [qt[1], qt[2], qt[3], qt[0]], rospy.Time.now(), name, base_frame) GRASP_FRAME_OFFSET = tra.quaternion_matrix([0, 0, -0.707, 0.707]) GRASP_FRAME_OFFSET[:3, 3] = [0, 0, 0.0] vis_pose = np.matmul(ef_pose, GRASP_FRAME_OFFSET) publish_grasps(marker_pub, base_frame, vis_pose) rospy.sleep(0.1) def show_grasps(ef_poses, name, base_frame='measured/base_link'): marker_pub = rospy.Publisher(name, MarkerArray, queue_size = 10) GRASP_FRAME_OFFSET = tra.quaternion_matrix([0, 0, -0.707, 0.707]) GRASP_FRAME_OFFSET[:3, 3] = [0, 0, 0.0] color = [0, 1, 0, 1] while not rospy.is_shutdown(): markerArray = MarkerArray() for i in range(ef_poses.shape[0]): ef_pose = ef_poses[i] vis_pose = np.matmul(ef_pose, GRASP_FRAME_OFFSET) marker = create_gripper_marker_message ( frame_id = base_frame, namespace = 'hand', mesh_resource = 'package://grasping_vae/panda_gripper.obj', color = color, marker_id = i, ) pos = tra.translation_from_matrix(vis_pose) quat = tra.quaternion_from_matrix(vis_pose) marker.pose = Pose(position=Point(*pos), orientation=Quaternion(*quat)) markerArray.markers.append(marker) # Renumber the marker IDs id = 0 for m in markerArray.markers: m.id = id id += 1 marker_pub.publish(markerArray) print('publishing grasps') rospy.sleep(0.1) def create_gripper_marker_message( frame_id, namespace, mesh_resource, color, lifetime=True, mesh_use_embedded_materials=True, marker_id=0, frame_locked=False,): marker = Marker() marker.action = Marker.ADD marker.id = marker_id marker.ns = namespace if lifetime: marker.lifetime = rospy.Duration(0.2) marker.frame_locked = frame_locked marker.header.frame_id = frame_id marker.header.stamp = rospy.Time.now() marker.scale.x = marker.scale.y = marker.scale.z = 0.5 marker.color.r = color[0] marker.color.g = color[1] marker.color.b = color[2] marker.color.a = color[3] marker.type = Marker.MESH_RESOURCE marker.mesh_resource = mesh_resource marker.mesh_use_embedded_materials = mesh_use_embedded_materials return marker def publish_grasps(publisher, frame_id, grasp): color = [0, 1, 0, 1] marker = create_gripper_marker_message ( frame_id=frame_id, namespace='hand', mesh_resource='package://grasping_vae/panda_gripper.obj', color=color, marker_id=0, ) pos = tra.translation_from_matrix(grasp) quat = tra.quaternion_from_matrix(grasp) marker.pose = Pose(position=Point(*pos), orientation=Quaternion(*quat)) publisher.publish(marker) def make_pose(tf_pose): """ Helper function to get a full matrix out of this pose """ trans, rot = tf_pose pose = tra.quaternion_matrix(rot) pose[:3, 3] = trans return pose def gaddpg_grasps_from_simulate_view(gaddpg, state, time, ef_pose): """ simulate views for gaddpg """ n = 30 mask = get_target_mask(state[0][0]) point_state = state[0][0][:, mask] # hand to base point_state = se3_transform_pc(ef_pose, point_state) print('target point shape', point_state.shape) # target center is in base coordinate now target_center = point_state.mean(1)[:3] print('target center', target_center) # set up gaddpg img_state = state[0][1] gaddpg.policy.eval() gaddpg.state_feature_extractor.eval() # sample view (simulated hand) in base view_poses = np.array(sample_ef_view_transform(n, 0.2, 0.5, target_center, linspace=True, anchor=True)) # base to view (simulated hand) inv_view_poses = se3_inverse_batch(view_poses) transform_view_points = np.matmul(inv_view_poses[:,:3,:3], point_state[:3]) + inv_view_poses[:,:3,[3]] # gaddpg generate grasps point_state_batch = torch.from_numpy(transform_view_points).cuda().float() time = torch.ones(len(point_state_batch) ).float().cuda() * 10. # time point_state_batch = torch.cat((point_state_batch, torch.zeros_like(point_state_batch)[:, [0]]), dim=1) policy_feat = gaddpg.extract_feature(img_state, point_state_batch, value=False, time_batch=time) _,_,_,gaddpg_aux = gaddpg.policy.sample(policy_feat) # compose with ef poses gaddpg_aux = gaddpg_aux.detach().cpu().numpy() unpacked_poses = [unpack_pose_rot_first(pose) for pose in gaddpg_aux] goal_pose_ws = np.matmul(view_poses, np.array(unpacked_poses)) # grasp to ef planner_cfg.external_grasps = goal_pose_ws # show_grasps(view_poses, 'grasps') # planner_cfg.external_grasps = view_poses # planner_cfg.external_grasps = np.concatenate((goal_pose_ws, view_poses), axis=0) # also visualize view planner_cfg.use_external_grasp = True def select_target_point(state, target_pt_num=1024): """ get target point cloud for gaddpg input """ point_state = state[0][0] target_mask = get_target_mask(point_state) # removing gripper point later point_state = point_state[:4, target_mask] # gripper_pc = point_state[:4, :6] # point_num = min(point_state.shape[1], target_pt_num) obj_pc = regularize_pc_point_count(point_state.T, point_num, False).T point_state = np.concatenate((gripper_pc, obj_pc), axis=1) return [(point_state, state[0][1])] + state[1:] def setup(): """ Set up networks with pretrained models and config as well as data migration """ load_from_pretrain = args.pretrained is not None and os.path.exists(args.pretrained) if load_from_pretrain and not args.finetune: cfg_folder = args.pretrained cfg_from_file(os.path.join(cfg_folder, "config.yaml"), reset_model_spec=False) cfg.RL_MODEL_SPEC = os.path.join(cfg_folder, cfg.RL_MODEL_SPEC.split("/")[-1]) dt_string = args.pretrained.split("/")[-1] else: if args.fix_output_time is None: dt_string = datetime.datetime.now().strftime("%d_%m_%Y_%H:%M:%S") else: dt_string = args.fix_output_time model_output_dir = os.path.join(cfg.OUTPUT_DIR, dt_string) print("Output will be saved to `{:s}`".format(model_output_dir)) new_output_dir = not os.path.exists(model_output_dir) and not args.test print("Using config:") pprint.pprint(cfg) net_dict = make_nets_opts_schedulers(cfg.RL_MODEL_SPEC, cfg.RL_TRAIN) print("Output will be saved to `{:s}`".format(model_output_dir)) return net_dict, dt_string def solve_ik(joints, pose): """ For simulating trajectory """ ik = robot.inverse_kinematics(pose[:3], ros_quat(pose[3:]), seed=joints[:7]) if ik is not None: joints = np.append(np.array(ik), [0.04, 0.04]) return joints def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description= '') parser.add_argument('--env-name', default="PandaYCBEnv") parser.add_argument('--policy', default="DDPG" ) parser.add_argument('--seed', type=int, default=123456, metavar='N' ) parser.add_argument('--save_model', action="store_true") parser.add_argument('--pretrained', type=str, default=None, help='test one model') parser.add_argument('--test', action="store_true", help='test one model') parser.add_argument('--log', action="store_true", help='log') parser.add_argument('--render', action="store_true", help='rendering') parser.add_argument('--record', action="store_true", help='record video') parser.add_argument('--test_episode_num', type=int, default=10, help='number of episodes to test') parser.add_argument('--finetune', action="store_true", help='deprecated') parser.add_argument('--expert', action="store_true", help='generate experte rollout') parser.add_argument('--num_runs', type=int, default=1) parser.add_argument('--max_cnt_per_obj', type=int, default=10) parser.add_argument('--model_surfix', type=str, default='latest', help='surfix for loaded model') parser.add_argument('--rand_objs', action="store_true", help='random objects in Shapenet') parser.add_argument('--load_test_scene', action="store_true", help='load pregenerated random scenes') parser.add_argument('--change_dynamics', action="store_true", help='change dynamics of the object') parser.add_argument('--egl', action="store_true", help='use egl plugin in bullet') parser.add_argument('--config_file', type=str, default=None) parser.add_argument('--output_file', type=str, default='rollout_success.txt') parser.add_argument('--fix_output_time', type=str, default=None) parser.add_argument('--use_external_grasp', action="store_true") parser.add_argument('--vis_grasp_net', action="store_true") parser.add_argument('--start_idx', type=int, default=1) parser.add_argument('--real_world', action="store_true") parser.add_argument('--preview_traj', action="store_true") parser.add_argument('--fix_initial_state', action="store_true") args = parser.parse_args() return args, parser ### TODO def get_joints(joint_listener): """ (9, ) robot joint in radians just for rendering and simulating """ if LOCAL_TEST: # dummy return np.array([-0.5596, 0.5123, 0.5575, -1.6929, 0.2937, 1.6097, -1.237, 0.04, 0.04]) else: joints = joint_listener.joint_position print('robot joints', joints) return joints def get_ef_pose(pose_listener): """ (4, 4) end effector pose matrix from base """ if LOCAL_TEST: # dummy return np.array([[-0.1915, 0.8724, -0.4498, 0.6041], [ 0.7355, 0.4309, 0.5228, -0.0031], [ 0.6499, -0.2307, -0.7242, 0.3213], [ 0., 0., 0., 1. ]]) else: base_frame = 'measured/base_link' target_frame = 'measured/panda_hand' try: tf_pose = pose_listener.lookupTransform(base_frame, target_frame, rospy.Time(0)) pose = make_pose(tf_pose) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): pose = None print('cannot find end-effector pose') sys.exit(1) return pose initial_joints = np.array([[-0.02535421982428639, -1.1120411124179306, 0.07915425984753728, -2.574433677700231, 0.0012470895074533914, 1.926161096378418, 0.9002216220876491], [0.5805350207739269, -0.8111362388758844, -1.1146667134109263, -2.2735199081247064, -0.18589086490010281, 2.2351670468606946, -0.36534494081830765], [-0.4345369377943954, -1.05069044781103, 1.119439285721959, -2.421638742837782, -0.02910207191286081, 2.0685257700621205, 1.5517931027048162], [0.6299110230284048, -1.2067977417344766, -1.3116628687477672, -2.0905629379711166, -0.32998541843294193, 1.8464060782205653, -0.45038227560404887], [-0.7665819353096028, -1.0393133004705655, 1.322218198802843, -2.0935060303990145, 0.33048455105753755, 1.8427947370070838, 1.746254150224718]]) if __name__ == '__main__': # Lirui: Replacing setup code # take a look at test_realworld for execution in ycb if necessary args, parser = parse_args() print('Called with args:') print(args) # create robot rospy.init_node("gaddpg") from OMG.ycb_render.robotPose import robot_pykdl robot = robot_pykdl.robot_kinematics(None, data_path='../../../') ############################# DEFINE RENDERER ''' from OMG.ycb_render.ycb_renderer import YCBRenderer width, height = 640, 480 renderer = YCBRenderer(width=width, height=height, offset=False) renderer.set_projection_matrix(width, height, width * 0.8, width * 0.8, width / 2, height / 2, 0.1, 6) renderer.set_camera_default() models = ["link1", "link2", "link3", "link4", "link5", "link6", "link7", "hand", "finger", "finger"] obj_paths = ["data/robots/{}.DAE".format(item) for item in models] renderer.load_objects(obj_paths) ''' V = [ [-0.9351, 0.3518, 0.0428, 0.3037], [0.2065, 0.639, -0.741, 0.132], [-0.2881, -0.684, -0.6702, 1.8803], [0.0, 0.0, 0.0, 1.0], ] CAGE_POINT_THRESHOLD = 25 ############################# SETUP MODEL root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..") torch.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True net_dict, output_time = setup() CONFIG = cfg.RL_TRAIN LOCAL_TEST = False # not using actual robot # Args ## updated if GA_DDPG_ONLY: cfg.RL_MAX_STEP = 20 else: cfg.RL_MAX_STEP = 50 CONFIG.uniform_num_pts = 4096 CONFIG.output_time = output_time CONFIG.off_policy = True POLICY = 'DDPG' if CONFIG.RL else 'BC' CONFIG.index_file = 'ycb_large.json' # The default config? cfg.ROS_CAMERA = 'D415' cfg.SCALES_BASE = [1.0] # Metrics input_dim = CONFIG.feature_input_dim cnt = 0. object_performance = {} model_output_dir = os.path.join(cfg.OUTPUT_DIR, output_time) pretrained_path = model_output_dir # graspnet graspnet_cfg = get_graspnet_config(parser) graspnet_cfg = joint_config( graspnet_cfg.vae_checkpoint_folder, graspnet_cfg.evaluator_checkpoint_folder, ) graspnet_cfg['threshold'] = 0.8 graspnet_cfg['sample_based_improvement'] = False graspnet_cfg['num_refine_steps'] = 5 # 20 graspnet_cfg['num_samples'] = 200 config = tensorflow.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True g1 = tensorflow.compat.v1.Graph() with g1.as_default(): sess = tensorflow.compat.v1.Session(config=config) with sess.as_default(): grasp_estimator = GraspEstimator(graspnet_cfg) grasp_estimator.build_network() grasp_estimator.load_weights(sess) if CONTACT_GRASPNET: graspnet_cfg_contact = get_graspnet_config_contact() global_config = config_utils.load_config(graspnet_cfg_contact.ckpt_dir, batch_size=graspnet_cfg_contact.forward_passes, arg_configs=graspnet_cfg_contact.arg_configs) # Create a session g2 = tensorflow.compat.v1.Graph() config = tensorflow.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True with g2.as_default(): sess_contact = tensorflow.compat.v1.Session(config=config) with sess_contact.as_default(): grasp_estimator_contact = GraspEstimatorContact(global_config) grasp_estimator_contact.build_network() saver = tensorflow.compat.v1.train.Saver(save_relative_paths=True) grasp_estimator_contact.load_weights(sess_contact, saver, graspnet_cfg_contact.ckpt_dir, mode='test') else: grasp_estimator_contact = None # GA-DDPG action_space = PandaTaskSpace6D() agent = globals()[POLICY](input_dim, action_space, CONFIG) # 138 agent.setup_feature_extractor(net_dict, args.test) agent.load_model(pretrained_path, surfix=args.model_surfix, set_init_step=True) ############################# DEFINE ROS INTERFACE listener = ImageListener(agent, grasp_estimator, grasp_estimator_contact) while not rospy.is_shutdown(): listener.run_network()
utils.py
import itertools import re import subprocess from threading import Thread from six.moves import queue def run_spark_subprocess(cmd, logger): """See https://bit.ly/2OpksJC for source of the subprocess stdout/stderr capture pattern in this function. """ # Spark sometimes logs in log4j format. In those cases, we detect and parse. # Example log line from Spark that this is intended to match: # 2019-03-27 16:00:19 INFO ContextHandler:781 - Started o.s.j.s.ServletContextHandler... log4j_regex = r'^(\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}:\d{2}) ([A-Z]{3,5})(.*?)$' def reader(pipe, pipe_name, p, msg_queue): try: with pipe: while p.poll() is None: for line in pipe.readlines(): match = re.match(log4j_regex, line) if match: line = match.groups()[2] msg_queue.put((pipe_name, line)) finally: # Use None as sentinel for done state, detected by iter() below msg_queue.put(None) p = subprocess.Popen( ' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines=True, shell=True, ) q = queue.Queue() Thread(target=reader, args=[p.stdout, 'stdout', p, q]).start() Thread(target=reader, args=[p.stderr, 'stderr', p, q]).start() for _ in range(2): # There will be two None sentinels, one for each stream for pipe_name, line in iter(q.get, None): if pipe_name == 'stdout': logger.info(line) elif pipe_name == 'stderr': logger.error(line) p.wait() return p.returncode def flatten_dict(d): def _flatten_dict(d, result, key_path=None): '''Iterates an arbitrarily nested dictionary and yield dot-notation key:value tuples. {'foo': {'bar': 3, 'baz': 1}, {'other': {'key': 1}} => [('foo.bar', 3), ('foo.baz', 1), ('other.key', 1)] ''' for k, v in d.items(): new_key_path = (key_path or []) + [k] if isinstance(v, dict): _flatten_dict(v, result, new_key_path) else: result.append(('.'.join(new_key_path), v)) result = [] _flatten_dict(d, result) return result def parse_spark_config(spark_conf): '''For each key-value pair in spark conf, we need to pass to CLI in format: --conf "key=value" ''' spark_conf_list = flatten_dict(spark_conf) return list( itertools.chain.from_iterable([('--conf', '{}={}'.format(*c)) for c in spark_conf_list]) )
_a4c_create.py
from cloudify import ctx from cloudify import utils from cloudify.exceptions import NonRecoverableError from StringIO import StringIO import base64 import os import platform import re import subprocess import sys import time import threading import platform import json def convert_env_value_to_string(envDict): for key, value in envDict.items(): envDict[str(key)] = str(envDict.pop(key)) def get_attribute_user(ctx): if get_attribute_from_top_host(ctx, 'user'): return get_attribute_from_top_host(ctx, 'user') if get_attribute(ctx, 'cloudify_agent'): return get_attribute(ctx, 'cloudify_agent').get('user', None) if get_attribute(ctx, 'agent_config'): return get_attribute(ctx, 'agent_config').get('user', None) return None def get_attribute_key(ctx): if get_attribute_from_top_host(ctx, 'key'): return get_attribute_from_top_host(ctx, 'key') if get_attribute(ctx, 'cloudify_agent'): return get_attribute(ctx, 'cloudify_agent').get('key', None) if get_attribute(ctx, 'agent_config'): return get_attribute(ctx, 'agent_config').get('key', None) return None def get_host(entity): if entity.instance.relationships: for relationship in entity.instance.relationships: if 'cloudify.relationships.contained_in' in relationship.type_hierarchy: return relationship.target return None def has_attribute_mapping(entity, attribute_name): # ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name,json.dumps(entity.node.properties))) mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None) if mapping_configuration is not None: if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name: return False else: return True return False def process_attribute_mapping(entity, attribute_name, data_retriever_function): # This is where attribute mapping is defined in the cloudify type mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name] # ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration))) # If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name # Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET if mapping_configuration['parameters'][0] == 'SELF': return data_retriever_function(entity, mapping_configuration['parameters'][1]) elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships: for relationship in entity.instance.relationships: if mapping_configuration['parameters'][1] in relationship.type_hierarchy: return data_retriever_function(relationship.target, mapping_configuration['parameters'][2]) return "" def get_nested_attribute(entity, attribute_names): deep_properties = get_attribute(entity, attribute_names[0]) attribute_names_iter = iter(attribute_names) next(attribute_names_iter) for attribute_name in attribute_names_iter: if deep_properties is None: return "" else: deep_properties = deep_properties.get(attribute_name, None) return deep_properties def _all_instances_get_nested_attribute(entity, attribute_names): return None def get_attribute(entity, attribute_name): if has_attribute_mapping(entity, attribute_name): # First check if any mapping exist for attribute mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute) # ctx.logger.debug('Mapping exists for attribute {0} with value {1}'.format(attribute_name, json.dumps(mapped_value))) return mapped_value # No mapping exist, try to get directly the attribute from the entity attribute_value = entity.instance.runtime_properties.get(attribute_name, None) if attribute_value is not None: # ctx.logger.debug('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id)) return attribute_value # Attribute retrieval fails, fall back to property property_value = entity.node.properties.get(attribute_name, None) if property_value is not None: return property_value # Property retrieval fails, fall back to host instance host = get_host(entity) if host is not None: # ctx.logger.debug('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id)) return get_attribute(host, attribute_name) # Nothing is found return "" def get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name): attribute_value = entity.instance.runtime_properties.get(capability_attribute_name, None) if attribute_value is not None: # ctx.logger.debug('Found the capability attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id)) return attribute_value return get_attribute(entity, attribute_name) def _all_instances_get_attribute(entity, attribute_name): result_map = {} # get all instances data using cfy rest client # we have to get the node using the rest client with node_instance.node_id # then we will have the relationships node = client.nodes.get(ctx.deployment.id, entity.node.id) all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id) for node_instance in all_node_instances: prop_value = __recursively_get_instance_data(node, node_instance, attribute_name) if prop_value is not None: # ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id, # node_instance.id)) result_map[node_instance.id + '_'] = prop_value return result_map # Same as previous method but will first try to find the attribute on the capability. def _all_instances_get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name): result_map = {} node = client.nodes.get(ctx.deployment.id, entity.node.id) all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id) for node_instance in all_node_instances: attribute_value = node_instance.runtime_properties.get(capability_attribute_name, None) if attribute_value is not None: prop_value = attribute_value else: prop_value = __recursively_get_instance_data(node, node_instance, attribute_name) if prop_value is not None: # ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id, # node_instance.id)) result_map[node_instance.id + '_'] = prop_value return result_map def get_property(entity, property_name): # Try to get the property value on the node property_value = entity.node.properties.get(property_name, None) if property_value is not None: # ctx.logger.debug('Found the property {0} with value {1} on the node {2}'.format(property_name, json.dumps(property_value), entity.node.id)) return property_value # No property found on the node, fall back to the host host = get_host(entity) if host is not None: # ctx.logger.debug('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id)) return get_property(host, property_name) return "" def get_instance_list(node_id): result = '' all_node_instances = client.node_instances.list(ctx.deployment.id, node_id) for node_instance in all_node_instances: if len(result) > 0: result += ',' result += node_instance.id return result def get_host_node_name(instance): for relationship in instance.relationships: if 'cloudify.relationships.contained_in' in relationship.type_hierarchy: return relationship.target.node.id return None def __get_relationship(node, target_name, relationship_type): for relationship in node.relationships: if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'): return relationship return None def __has_attribute_mapping(node, attribute_name): # ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, json.dumps(node.properties))) mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None) if mapping_configuration is not None: if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name: return False else: return True return False def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function): # This is where attribute mapping is defined in the cloudify type mapping_configuration = node.properties['_a4c_att_' + attribute_name] # ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration))) # If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name # Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET if mapping_configuration['parameters'][0] == 'SELF': return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1]) elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships: for rel in node_instance.relationships: relationship = __get_relationship(node, rel.get('target_name'), rel.get('type')) if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'): target_instance = client.node_instances.get(rel.get('target_id')) target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id) return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2]) return None def __recursively_get_instance_data(node, node_instance, attribute_name): if __has_attribute_mapping(node, attribute_name): return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data) attribute_value = node_instance.runtime_properties.get(attribute_name, None) if attribute_value is not None: return attribute_value elif node_instance.relationships: for rel in node_instance.relationships: # on rel we have target_name, target_id (instanceId), type relationship = __get_relationship(node, rel.get('target_name'), rel.get('type')) if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'): parent_instance = client.node_instances.get(rel.get('target_id')) parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id) return __recursively_get_instance_data(parent_node, parent_instance, attribute_name) return None else: return None def get_public_or_private_ip(entity): public_ip = get_attribute(entity, 'public_ip_address') if not public_ip: return get_attribute(entity, 'ip_address') return public_ip def get_attribute_from_top_host(entity, attribute_name): host = get_host(entity) while host is not None: entity = host host = get_host(entity) return get_attribute(entity, attribute_name) from cloudify import utils from cloudify_rest_client import CloudifyClient from cloudify.state import ctx_parameters as inputs import os client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', cert=utils.get_local_rest_certificate(), token= utils.get_rest_token(), tenant= utils.get_tenant_name()) def download(child_rel_path, child_abs_path, download_dir): artifact_downloaded_path = ctx.download_resource(child_abs_path) new_file = os.path.join(download_dir, child_rel_path) new_file_dir = os.path.dirname(new_file) if not os.path.exists(new_file_dir): os.makedirs(new_file_dir) os.rename(artifact_downloaded_path, new_file) ctx.logger.debug('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file) return new_file def download_artifacts(artifacts, download_dir): downloaded_artifacts = {} if not os.path.exists(download_dir): os.makedirs(download_dir) for artifact_name, artifact_ref in artifacts.items(): ctx.logger.debug('Download artifact ' + artifact_name) if isinstance(artifact_ref, basestring): downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir) else: child_download_dir = os.path.join(download_dir, artifact_name) for child_path in artifact_ref: download(child_path['relative_path'], child_path['absolute_path'], child_download_dir) downloaded_artifacts[artifact_name] = child_download_dir return downloaded_artifacts from __future__ import unicode_literals import json try: import hcl has_hcl_parser = True except ImportError: has_hcl_parser = False import requests try: from urlparse import urljoin except ImportError: from urllib.parse import urljoin class VaultError(Exception): def __init__(self, message=None, errors=None): if errors: message = ', '.join(errors) self.errors = errors super(VaultError, self).__init__(message) class InvalidRequest(VaultError): pass class Unauthorized(VaultError): pass class Forbidden(VaultError): pass class InvalidPath(VaultError): pass class RateLimitExceeded(VaultError): pass class InternalServerError(VaultError): pass class VaultNotInitialized(VaultError): pass class VaultDown(VaultError): pass class UnexpectedError(VaultError): pass class HashiCorpVaultClient(object): def __init__(self, url='http://localhost:8200', token=None, cert=None, verify=True, timeout=30, proxies=None, allow_redirects=True, session=None): if not session: session = requests.Session() self.allow_redirects = allow_redirects self.session = session self.token = token self._url = url self._kwargs = { 'cert': cert, 'verify': verify, 'timeout': timeout, 'proxies': proxies, } def read(self, path, wrap_ttl=None): """ GET /<path> """ try: return self._get('/v1/{0}'.format(path), wrap_ttl=wrap_ttl).json() except InvalidPath: return None def list(self, path): """ GET /<path>?list=true """ try: payload = { 'list': True } return self._get('/v1/{}'.format(path), params=payload).json() except InvalidPath: return None def write(self, path, wrap_ttl=None, **kwargs): """ PUT /<path> """ response = self._put('/v1/{0}'.format(path), json=kwargs, wrap_ttl=wrap_ttl) if response.status_code == 200: return response.json() def delete(self, path): """ DELETE /<path> """ self._delete('/v1/{0}'.format(path)) def unwrap(self, token): """ GET /cubbyhole/response X-Vault-Token: <token> """ path = "cubbyhole/response" _token = self.token try: self.token = token return json.loads(self.read(path)['data']['response']) finally: self.token = _token def is_initialized(self): """ GET /sys/init """ return self._get('/v1/sys/init').json()['initialized'] def initialize(self, secret_shares=5, secret_threshold=3, pgp_keys=None): """ PUT /sys/init """ params = { 'secret_shares': secret_shares, 'secret_threshold': secret_threshold, } if pgp_keys: if len(pgp_keys) != secret_shares: raise ValueError('Length of pgp_keys must equal secret shares') params['pgp_keys'] = pgp_keys return self._put('/v1/sys/init', json=params).json() @property def seal_status(self): """ GET /sys/seal-status """ return self._get('/v1/sys/seal-status').json() def is_sealed(self): return self.seal_status['sealed'] def seal(self): """ PUT /sys/seal """ self._put('/v1/sys/seal') def unseal_reset(self): """ PUT /sys/unseal """ params = { 'reset': True, } return self._put('/v1/sys/unseal', json=params).json() def unseal(self, key): """ PUT /sys/unseal """ params = { 'key': key, } return self._put('/v1/sys/unseal', json=params).json() def unseal_multi(self, keys): result = None for key in keys: result = self.unseal(key) if not result['sealed']: break return result @property def key_status(self): """ GET /sys/key-status """ return self._get('/v1/sys/key-status').json() def rotate(self): """ PUT /sys/rotate """ self._put('/v1/sys/rotate') @property def rekey_status(self): """ GET /sys/rekey/init """ return self._get('/v1/sys/rekey/init').json() def start_rekey(self, secret_shares=5, secret_threshold=3, pgp_keys=None, backup=False): """ PUT /sys/rekey/init """ params = { 'secret_shares': secret_shares, 'secret_threshold': secret_threshold, } if pgp_keys: if len(pgp_keys) != secret_shares: raise ValueError('Length of pgp_keys must equal secret shares') params['pgp_keys'] = pgp_keys params['backup'] = backup resp = self._put('/v1/sys/rekey/init', json=params) if resp.text: return resp.json() def cancel_rekey(self): """ DELETE /sys/rekey/init """ self._delete('/v1/sys/rekey/init') def rekey(self, key, nonce=None): """ PUT /sys/rekey/update """ params = { 'key': key, } if nonce: params['nonce'] = nonce return self._put('/v1/sys/rekey/update', json=params).json() def rekey_multi(self, keys, nonce=None): result = None for key in keys: result = self.rekey(key, nonce=nonce) if 'complete' in result and result['complete']: break return result def get_backed_up_keys(self): """ GET /sys/rekey/backup """ return self._get('/v1/sys/rekey/backup').json() @property def ha_status(self): """ GET /sys/leader """ return self._get('/v1/sys/leader').json() def renew_secret(self, lease_id, increment=None): """ PUT /sys/leases/renew """ params = { 'lease_id': lease_id, 'increment': increment, } return self._put('/v1/sys/leases/renew', json=params).json() def revoke_secret(self, lease_id): """ PUT /sys/revoke/<lease id> """ self._put('/v1/sys/revoke/{0}'.format(lease_id)) def revoke_secret_prefix(self, path_prefix): """ PUT /sys/revoke-prefix/<path prefix> """ self._put('/v1/sys/revoke-prefix/{0}'.format(path_prefix)) def revoke_self_token(self): """ PUT /auth/token/revoke-self """ self._put('/v1/auth/token/revoke-self') def list_secret_backends(self): """ GET /sys/mounts """ return self._get('/v1/sys/mounts').json() def enable_secret_backend(self, backend_type, description=None, mount_point=None, config=None): """ POST /sys/auth/<mount point> """ if not mount_point: mount_point = backend_type params = { 'type': backend_type, 'description': description, 'config': config, } self._post('/v1/sys/mounts/{0}'.format(mount_point), json=params) def tune_secret_backend(self, backend_type, mount_point=None, default_lease_ttl=None, max_lease_ttl=None): """ POST /sys/mounts/<mount point>/tune """ if not mount_point: mount_point = backend_type params = { 'default_lease_ttl': default_lease_ttl, 'max_lease_ttl': max_lease_ttl } self._post('/v1/sys/mounts/{0}/tune'.format(mount_point), json=params) def get_secret_backend_tuning(self, backend_type, mount_point=None): """ GET /sys/mounts/<mount point>/tune """ if not mount_point: mount_point = backend_type return self._get('/v1/sys/mounts/{0}/tune'.format(mount_point)).json() def disable_secret_backend(self, mount_point): """ DELETE /sys/mounts/<mount point> """ self._delete('/v1/sys/mounts/{0}'.format(mount_point)) def remount_secret_backend(self, from_mount_point, to_mount_point): """ POST /sys/remount """ params = { 'from': from_mount_point, 'to': to_mount_point, } self._post('/v1/sys/remount', json=params) def list_policies(self): """ GET /sys/policy """ return self._get('/v1/sys/policy').json()['policies'] def get_policy(self, name, parse=False): """ GET /sys/policy/<name> """ try: policy = self._get('/v1/sys/policy/{0}'.format(name)).json()['rules'] if parse: if not has_hcl_parser: raise ImportError('pyhcl is required for policy parsing') policy = hcl.loads(policy) return policy except InvalidPath: return None def set_policy(self, name, rules): """ PUT /sys/policy/<name> """ if isinstance(rules, dict): rules = json.dumps(rules) params = { 'rules': rules, } self._put('/v1/sys/policy/{0}'.format(name), json=params) def delete_policy(self, name): """ DELETE /sys/policy/<name> """ self._delete('/v1/sys/policy/{0}'.format(name)) def list_audit_backends(self): """ GET /sys/audit """ return self._get('/v1/sys/audit').json() def enable_audit_backend(self, backend_type, description=None, options=None, name=None): """ POST /sys/audit/<name> """ if not name: name = backend_type params = { 'type': backend_type, 'description': description, 'options': options, } self._post('/v1/sys/audit/{0}'.format(name), json=params) def disable_audit_backend(self, name): """ DELETE /sys/audit/<name> """ self._delete('/v1/sys/audit/{0}'.format(name)) def audit_hash(self, name, input): """ POST /sys/audit-hash """ params = { 'input': input, } return self._post('/v1/sys/audit-hash/{0}'.format(name), json=params).json() def create_token(self, role=None, token_id=None, policies=None, meta=None, no_parent=False, lease=None, display_name=None, num_uses=None, no_default_policy=False, ttl=None, orphan=False, wrap_ttl=None, renewable=None, explicit_max_ttl=None): """ POST /auth/token/create POST /auth/token/create/<role> POST /auth/token/create-orphan """ params = { 'id': token_id, 'policies': policies, 'meta': meta, 'no_parent': no_parent, 'display_name': display_name, 'num_uses': num_uses, 'no_default_policy': no_default_policy, 'renewable': renewable } if lease: params['lease'] = lease else: params['ttl'] = ttl params['explicit_max_ttl'] = explicit_max_ttl if explicit_max_ttl: params['explicit_max_ttl'] = explicit_max_ttl if orphan: return self._post('/v1/auth/token/create-orphan', json=params, wrap_ttl=wrap_ttl).json() elif role: return self._post('/v1/auth/token/create/{0}'.format(role), json=params, wrap_ttl=wrap_ttl).json() else: return self._post('/v1/auth/token/create', json=params, wrap_ttl=wrap_ttl).json() def lookup_token(self, token=None, accessor=False, wrap_ttl=None): """ GET /auth/token/lookup/<token> GET /auth/token/lookup-accessor/<token-accessor> GET /auth/token/lookup-self """ if token: if accessor: path = '/v1/auth/token/lookup-accessor/{0}'.format(token) return self._post(path, wrap_ttl=wrap_ttl).json() else: return self._get('/v1/auth/token/lookup/{0}'.format(token)).json() else: return self._get('/v1/auth/token/lookup-self', wrap_ttl=wrap_ttl).json() def revoke_token(self, token, orphan=False, accessor=False): """ POST /auth/token/revoke/<token> POST /auth/token/revoke-orphan/<token> POST /auth/token/revoke-accessor/<token-accessor> """ if accessor and orphan: msg = "revoke_token does not support 'orphan' and 'accessor' flags together" raise InvalidRequest(msg) elif accessor: self._post('/v1/auth/token/revoke-accessor/{0}'.format(token)) elif orphan: self._post('/v1/auth/token/revoke-orphan/{0}'.format(token)) else: self._post('/v1/auth/token/revoke/{0}'.format(token)) def revoke_token_prefix(self, prefix): """ POST /auth/token/revoke-prefix/<prefix> """ self._post('/v1/auth/token/revoke-prefix/{0}'.format(prefix)) def renew_token(self, token=None, increment=None, wrap_ttl=None): """ POST /auth/token/renew/<token> POST /auth/token/renew-self """ params = { 'increment': increment, } if token: path = '/v1/auth/token/renew/{0}'.format(token) return self._post(path, json=params, wrap_ttl=wrap_ttl).json() else: return self._post('/v1/auth/token/renew-self', json=params, wrap_ttl=wrap_ttl).json() def create_token_role(self, role, allowed_policies=None, orphan=None, period=None, renewable=None, path_suffix=None, explicit_max_ttl=None): """ POST /auth/token/roles/<role> """ params = { 'allowed_policies': allowed_policies, 'orphan': orphan, 'period': period, 'renewable': renewable, 'path_suffix': path_suffix, 'explicit_max_ttl': explicit_max_ttl } return self._post('/v1/auth/token/roles/{0}'.format(role), json=params) def token_role(self, role): """ Returns the named token role. """ return self.read('auth/token/roles/{0}'.format(role)) def delete_token_role(self, role): """ Deletes the named token role. """ return self.delete('auth/token/roles/{0}'.format(role)) def list_token_roles(self): """ GET /auth/token/roles?list=true """ return self.list('auth/token/roles') def logout(self, revoke_token=False): """ Clears the token used for authentication, optionally revoking it before doing so """ if revoke_token: self.revoke_self_token() self.token = None def is_authenticated(self): """ Helper method which returns the authentication status of the client """ if not self.token: return False try: self.lookup_token() return True except Forbidden: return False except InvalidPath: return False except InvalidRequest: return False def auth_app_id(self, app_id, user_id, mount_point='app-id', use_token=True): """ POST /auth/<mount point>/login """ params = { 'app_id': app_id, 'user_id': user_id, } return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token) def auth_tls(self, mount_point='cert', use_token=True): """ POST /auth/<mount point>/login """ return self.auth('/v1/auth/{0}/login'.format(mount_point), use_token=use_token) def auth_userpass(self, username, password, mount_point='userpass', use_token=True, **kwargs): """ POST /auth/<mount point>/login/<username> """ params = { 'password': password, } params.update(kwargs) return self.auth('/v1/auth/{0}/login/{1}'.format(mount_point, username), json=params, use_token=use_token) def auth_ec2(self, pkcs7, nonce=None, role=None, use_token=True): """ POST /auth/aws-ec2/login """ params = {'pkcs7': pkcs7} if nonce: params['nonce'] = nonce if role: params['role'] = role return self.auth('/v1/auth/aws-ec2/login', json=params, use_token=use_token).json() def create_userpass(self, username, password, policies, mount_point='userpass', **kwargs): """ POST /auth/<mount point>/users/<username> """ # Users can have more than 1 policy. It is easier for the user to pass in the # policies as a list so if they do, we need to convert to a , delimited string. if isinstance(policies, (list, set, tuple)): policies = ','.join(policies) params = { 'password': password, 'policies': policies } params.update(kwargs) return self._post('/v1/auth/{}/users/{}'.format(mount_point, username), json=params) def delete_userpass(self, username, mount_point='userpass'): """ DELETE /auth/<mount point>/users/<username> """ return self._delete('/v1/auth/{}/users/{}'.format(mount_point, username)) def create_app_id(self, app_id, policies, display_name=None, mount_point='app-id', **kwargs): """ POST /auth/<mount point>/map/app-id/<app_id> """ # app-id can have more than 1 policy. It is easier for the user to pass in the # policies as a list so if they do, we need to convert to a , delimited string. if isinstance(policies, (list, set, tuple)): policies = ','.join(policies) params = { 'value': policies } # Only use the display_name if it has a value. Made it a named param for user # convienence instead of leaving it as part of the kwargs if display_name: params['display_name'] = display_name params.update(kwargs) return self._post('/v1/auth/{}/map/app-id/{}'.format(mount_point, app_id), json=params) def get_app_id(self, app_id, mount_point='app-id', wrap_ttl=None): """ GET /auth/<mount_point>/map/app-id/<app_id> """ path = '/v1/auth/{0}/map/app-id/{1}'.format(mount_point, app_id) return self._get(path, wrap_ttl=wrap_ttl).json() def delete_app_id(self, app_id, mount_point='app-id'): """ DELETE /auth/<mount_point>/map/app-id/<app_id> """ return self._delete('/v1/auth/{0}/map/app-id/{1}'.format(mount_point, app_id)) def create_user_id(self, user_id, app_id, cidr_block=None, mount_point='app-id', **kwargs): """ POST /auth/<mount point>/map/user-id/<user_id> """ # user-id can be associated to more than 1 app-id (aka policy). It is easier for the user to # pass in the policies as a list so if they do, we need to convert to a , delimited string. if isinstance(app_id, (list, set, tuple)): app_id = ','.join(app_id) params = { 'value': app_id } # Only use the cidr_block if it has a value. Made it a named param for user # convienence instead of leaving it as part of the kwargs if cidr_block: params['cidr_block'] = cidr_block params.update(kwargs) return self._post('/v1/auth/{}/map/user-id/{}'.format(mount_point, user_id), json=params) def get_user_id(self, user_id, mount_point='app-id', wrap_ttl=None): """ GET /auth/<mount_point>/map/user-id/<user_id> """ path = '/v1/auth/{0}/map/user-id/{1}'.format(mount_point, user_id) return self._get(path, wrap_ttl=wrap_ttl).json() def delete_user_id(self, user_id, mount_point='app-id'): """ DELETE /auth/<mount_point>/map/user-id/<user_id> """ return self._delete('/v1/auth/{0}/map/user-id/{1}'.format(mount_point, user_id)) def create_vault_ec2_client_configuration(self, access_key, secret_key, endpoint=None): """ POST /auth/aws-ec2/config/client """ params = { 'access_key': access_key, 'secret_key': secret_key } if endpoint is not None: params['endpoint'] = endpoint return self._post('/v1/auth/aws-ec2/config/client', json=params) def get_vault_ec2_client_configuration(self): """ GET /auth/aws-ec2/config/client """ return self._get('/v1/auth/aws-ec2/config/client').json() def delete_vault_ec2_client_configuration(self): """ DELETE /auth/aws-ec2/config/client """ return self._delete('/v1/auth/aws-ec2/config/client') def create_vault_ec2_certificate_configuration(self, cert_name, aws_public_cert): """ POST /auth/aws-ec2/config/certificate/<cert_name> """ params = { 'cert_name': cert_name, 'aws_public_cert': aws_public_cert } return self._post('/v1/auth/aws-ec2/config/certificate/{0}'.format(cert_name), json=params) def get_vault_ec2_certificate_configuration(self, cert_name): """ GET /auth/aws-ec2/config/certificate/<cert_name> """ return self._get('/v1/auth/aws-ec2/config/certificate/{0}'.format(cert_name)).json() def list_vault_ec2_certificate_configurations(self): """ GET /auth/aws-ec2/config/certificates?list=true """ params = {'list': True} return self._get('/v1/auth/aws-ec2/config/certificates', params=params).json() def create_ec2_role(self, role, bound_ami_id=None, bound_account_id=None, bound_iam_role_arn=None, bound_iam_instance_profile_arn=None, role_tag=None, max_ttl=None, policies=None, allow_instance_migration=False, disallow_reauthentication=False, **kwargs): """ POST /auth/aws-ec2/role/<role> """ params = { 'role': role, 'disallow_reauthentication': disallow_reauthentication, 'allow_instance_migration': allow_instance_migration } if bound_ami_id is not None: params['bound_ami_id'] = bound_ami_id if bound_account_id is not None: params['bound_account_id'] = bound_account_id if bound_iam_role_arn is not None: params['bound_iam_role_arn'] = bound_iam_role_arn if bound_iam_instance_profile_arn is not None: params['bound_iam_instance_profile_arn'] = bound_iam_instance_profile_arn if role_tag is not None: params['role_tag'] = role_tag if max_ttl is not None: params['max_ttl'] = max_ttl if policies is not None: params['policies'] = policies params.update(**kwargs) return self._post('/v1/auth/aws-ec2/role/{0}'.format(role), json=params) def get_ec2_role(self, role): """ GET /auth/aws-ec2/role/<role> """ return self._get('/v1/auth/aws-ec2/role/{0}'.format(role)).json() def delete_ec2_role(self, role): """ DELETE /auth/aws-ec2/role/<role> """ return self._delete('/v1/auth/aws-ec2/role/{0}'.format(role)) def list_ec2_roles(self): """ GET /auth/aws-ec2/roles?list=true """ try: return self._get('/v1/auth/aws-ec2/roles', params={'list': True}).json() except InvalidPath: return None def create_ec2_role_tag(self, role, policies=None, max_ttl=None, instance_id=None, disallow_reauthentication=False, allow_instance_migration=False): """ POST /auth/aws-ec2/role/<role>/tag """ params = { 'role': role, 'disallow_reauthentication': disallow_reauthentication, 'allow_instance_migration': allow_instance_migration } if max_ttl is not None: params['max_ttl'] = max_ttl if policies is not None: params['policies'] = policies if instance_id is not None: params['instance_id'] = instance_id return self._post('/v1/auth/aws-ec2/role/{0}/tag'.format(role), json=params).json() def auth_ldap(self, username, password, mount_point='ldap', use_token=True, **kwargs): """ POST /auth/<mount point>/login/<username> """ params = { 'password': password, } params.update(kwargs) return self.auth('/v1/auth/{0}/login/{1}'.format(mount_point, username), json=params, use_token=use_token) def auth_github(self, token, mount_point='github', use_token=True): """ POST /auth/<mount point>/login """ params = { 'token': token, } return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token) def auth(self, url, use_token=True, **kwargs): response = self._post(url, **kwargs).json() if use_token: self.token = response['auth']['client_token'] return response def list_auth_backends(self): """ GET /sys/auth """ return self._get('/v1/sys/auth').json() def enable_auth_backend(self, backend_type, description=None, mount_point=None): """ POST /sys/auth/<mount point> """ if not mount_point: mount_point = backend_type params = { 'type': backend_type, 'description': description, } self._post('/v1/sys/auth/{0}'.format(mount_point), json=params) def disable_auth_backend(self, mount_point): """ DELETE /sys/auth/<mount point> """ self._delete('/v1/sys/auth/{0}'.format(mount_point)) def create_role(self, role_name, **kwargs): """ POST /auth/approle/role/<role name> """ self._post('/v1/auth/approle/role/{0}'.format(role_name), json=kwargs) def list_roles(self): """ GET /auth/approle/role """ return self._get('/v1/auth/approle/role?list=true').json() def get_role_id(self, role_name): """ GET /auth/approle/role/<role name>/role-id """ url = '/v1/auth/approle/role/{0}/role-id'.format(role_name) return self._get(url).json()['data']['role_id'] def set_role_id(self, role_name, role_id): """ POST /auth/approle/role/<role name>/role-id """ url = '/v1/auth/approle/role/{0}/role-id'.format(role_name) params = { 'role_id': role_id } self._post(url, json=params) def get_role(self, role_name): """ GET /auth/approle/role/<role name> """ return self._get('/v1/auth/approle/role/{0}'.format(role_name)).json() def create_role_secret_id(self, role_name, meta=None): """ POST /auth/approle/role/<role name>/secret-id """ url = '/v1/auth/approle/role/{0}/secret-id'.format(role_name) params = {} if meta is not None: params['metadata'] = json.dumps(meta) return self._post(url, json=params).json() def get_role_secret_id(self, role_name, secret_id): """ POST /auth/approle/role/<role name>/secret-id/lookup """ url = '/v1/auth/approle/role/{0}/secret-id/lookup'.format(role_name) params = { 'secret_id': secret_id } return self._post(url, json=params).json() def list_role_secrets(self, role_name): """ GET /auth/approle/role/<role name>/secret-id?list=true """ url = '/v1/auth/approle/role/{0}/secret-id?list=true'.format(role_name) return self._get(url).json() def get_role_secret_id_accessor(self, role_name, secret_id_accessor): """ GET /auth/approle/role/<role name>/secret-id-accessor/<secret_id_accessor> """ url = '/v1/auth/approle/role/{0}/secret-id-accessor/{1}'.format(role_name, secret_id_accessor) return self._get(url).json() def delete_role_secret_id(self, role_name, secret_id): """ POST /auth/approle/role/<role name>/secret-id/destroy """ url = '/v1/auth/approle/role/{0}/secret-id/destroy'.format(role_name) params = { 'secret_id': secret_id } self._post(url, json=params) def delete_role_secret_id_accessor(self, role_name, secret_id_accessor): """ DELETE /auth/approle/role/<role name>/secret-id/<secret_id_accessor> """ url = '/v1/auth/approle/role/{0}/secret-id-accessor/{1}'.format(role_name, secret_id_accessor) self._delete(url) def create_role_custom_secret_id(self, role_name, secret_id, meta=None): """ POST /auth/approle/role/<role name>/custom-secret-id """ url = '/v1/auth/approle/role/{0}/custom-secret-id'.format(role_name) params = { 'secret_id': secret_id } if meta is not None: params['meta'] = meta return self._post(url, json=params).json() def auth_approle(self, role_id, secret_id=None, mount_point='approle', use_token=True): """ POST /auth/approle/login """ params = { 'role_id': role_id } if secret_id is not None: params['secret_id'] = secret_id return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token) def close(self): """ Close the underlying Requests session """ self.session.close() def _get(self, url, **kwargs): return self.__request('get', url, **kwargs) def _post(self, url, **kwargs): return self.__request('post', url, **kwargs) def _put(self, url, **kwargs): return self.__request('put', url, **kwargs) def _delete(self, url, **kwargs): return self.__request('delete', url, **kwargs) def __request(self, method, url, headers=None, **kwargs): url = urljoin(self._url, url) if not headers: headers = {} if self.token: headers['X-Vault-Token'] = self.token wrap_ttl = kwargs.pop('wrap_ttl', None) if wrap_ttl: headers['X-Vault-Wrap-TTL'] = str(wrap_ttl) _kwargs = self._kwargs.copy() _kwargs.update(kwargs) response = self.session.request(method, url, headers=headers, allow_redirects=False, **_kwargs) # NOTE(ianunruh): workaround for https://github.com/ianunruh/hvac/issues/51 while response.is_redirect and self.allow_redirects: url = urljoin(self._url, response.headers['Location']) response = self.session.request(method, url, headers=headers, allow_redirects=False, **_kwargs) if response.status_code >= 400 and response.status_code < 600: text = errors = None if response.headers.get('Content-Type') == 'application/json': errors = response.json().get('errors') if errors is None: text = response.text self.__raise_error(response.status_code, text, errors=errors) return response def __raise_error(self, status_code, message=None, errors=None): if status_code == 400: raise InvalidRequest(message, errors=errors) elif status_code == 401: raise Unauthorized(message, errors=errors) elif status_code == 403: raise Forbidden(message, errors=errors) elif status_code == 404: raise InvalidPath(message, errors=errors) elif status_code == 429: raise RateLimitExceeded(message, errors=errors) elif status_code == 500: raise InternalServerError(message, errors=errors) elif status_code == 501: raise VaultNotInitialized(message, errors=errors) elif status_code == 503: raise VaultDown(message, errors=errors) else: raise UnexpectedError(message) def connect_to_vault_by_token(url, token): return HashiCorpVaultClient(url=url, token=token) def connect_to_vault_by_ldap(url, user, password): client = HashiCorpVaultClient(url=url) client.auth_ldap(user, password) return client credentials = json.loads(client.secrets.get('vault-credentials-testGenerateLamp')) vault_client = connect_to_vault_by_ldap(url = 'https://localhost', user = credentials.user, password = credentials.password) def get_secret(secret_path): return vault_client.read(secret_path) env_map = {} env_map['NODE'] = ctx.node.id env_map['INSTANCE'] = ctx.instance.id env_map['INSTANCES'] = get_instance_list(ctx.node.id) env_map['HOST'] = get_host_node_name(ctx.instance) env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address') env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx) env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx) env_map['SELF_db_password'] = get_secret('/my/password') env_map['SELF_user'] = '' env_map['SELF_CAPABILITIES_database_endpoint_protocol'] = r'tcp' env_map['SELF_name'] = r'wordpress' env_map['SELF_CAPABILITIES_database_endpoint_url_path'] = '' env_map['SELF_port'] = r'3306' env_map['SELF_password'] = '' env_map['SELF_bind_address'] = r'true' env_map['SELF_storage_path'] = r'/mountedStorage' env_map['SELF_CAPABILITIES_database_endpoint_secure'] = r'false' env_map['SELF_db_user'] = r'pass' env_map['SELF_CAPABILITIES_database_endpoint_initiator'] = r'source' env_map['SELF_CAPABILITIES_database_endpoint_network_name'] = r'PRIVATE' env_map['SELF_CAPABILITIES_database_endpoint_port'] = '' env_map['SELF_CAPABILITIES_database_endpoint_port_name'] = '' node_artifacts = { "configs": [ { "relative_path": "mysqld_charset.cnf", "absolute_path": "_a4c_artifact/Mysql/configs/configs/mysqld_charset.cnf" } ] } relationship_artifacts = { } artifacts = node_artifacts.copy() artifacts.update(relationship_artifacts) download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads') env_map.update(download_artifacts(artifacts, download_dir)) if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None: ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env'])) env_map.update(inputs['process']['env']) def convert_env_value_to_string(envDict): for key, value in envDict.items(): envDict[str(key)] = str(envDict.pop(key)) def parse_output(output): # by convention, the last output is the result of the operation last_output = None outputs = {} pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)') for line in output.splitlines(): match = pattern.match(line) if match is None: last_output = line else: output_name = match.group(1) output_value = match.group(2) outputs[output_name] = output_value return {'last_output': last_output, 'outputs': outputs} def execute(script_path, process, outputNames, command_prefix=None, cwd=None, raiseException=True): os.chmod(script_path, 0755) on_posix = 'posix' in sys.builtin_module_names env = os.environ.copy() process_env = process.get('env', {}) env.update(process_env) if outputNames is not None: env['EXPECTED_OUTPUTS'] = outputNames if platform.system() == 'Windows': wrapper_path = ctx.download_resource("scriptWrapper.bat") else: wrapper_path = ctx.download_resource("scriptWrapper.sh") os.chmod(wrapper_path, 0755) command = '{0} {1}'.format(wrapper_path, script_path) else: command = script_path if command_prefix is not None: command = "{0} {1}".format(command_prefix, command) ctx.logger.info('Executing: {0} in env {1}'.format(command, env)) process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd, bufsize=1, close_fds=on_posix) return_code = None stdout_consumer = OutputConsumer(process.stdout) stderr_consumer = OutputConsumer(process.stderr) while True: return_code = process.poll() if return_code is not None: break time.sleep(0.1) stdout_consumer.join() stderr_consumer.join() parsed_output = parse_output(stdout_consumer.buffer.getvalue()) if outputNames is not None: outputNameList = outputNames.split(';') for outputName in outputNameList: ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None))) if return_code != 0: error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code, stdout_consumer.buffer.getvalue(), stderr_consumer.buffer.getvalue()) error_message = str(unicode(error_message, errors='ignore')) ctx.logger.error(error_message) if raiseException: ctx.logger.debug("Script {0} will raise an exception".format(command)) raise NonRecoverableError(error_message) else: ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(), stderr_consumer.buffer.getvalue()) ok_message = str(unicode(ok_message, errors='ignore')) ctx.logger.info(ok_message) return parsed_output def executePy(script_path, tosca_env_map): tosca_params={'tosca': {'inputs': tosca_env_map, 'outputs': {}}} execfile(script_path, globals().copy(), tosca_params) return tosca_params['tosca']; class OutputConsumer(object): def __init__(self, out): self.out = out self.buffer = StringIO() self.consumer = threading.Thread(target=self.consume_output) self.consumer.daemon = True self.consumer.start() def consume_output(self): for line in iter(self.out.readline, b''): self.buffer.write(line) self.out.close() def join(self): self.consumer.join() new_script_process = {'env': env_map} operationOutputNames = None convert_env_value_to_string(new_script_process['env']) raiseExceptionOnFailure = True parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Mysql/tosca.interfaces.node.lifecycle.Standard/create/install_mysql.sh'), new_script_process, operationOutputNames, raiseException=raiseExceptionOnFailure) outputs = parsed_output['outputs'].items() for k,v in outputs: ctx.logger.info('Output name: {0} value: {1}'.format(k, v)) ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:create:{0}'.format(k)] = v ctx.instance.update()
train_a3c.py
import argparse import os import re import numpy as np import torch import torch.multiprocessing as _mp from torch import nn from torch.nn import functional as F from torch.utils.tensorboard import SummaryWriter import environment import tpu from models import A3C from optim import SharedAdam from utils.helper import evaluate_A3C_lstm def get_args(): ap = argparse.ArgumentParser() ap.add_argument("-e", "--environment", default="BreakoutDeterministic-v4", help="Envirement to play") ap.add_argument("-l", "--log_dir", default="logs", help="Logs dir for tensorboard") ap.add_argument("-t", "--train_dir", default="train_dir", help="Checkpoint directory") ap.add_argument("-c", "--checkpoint", default=None, help="Checkpoint for agent") ap.add_argument("--tpu", action='store_true', help="Enable TPU") ap.add_argument("--lr", default=1e-5, type=float, help="Learning Rate") ap.add_argument("--max_grad_norm", default=50.0, type=float, help="Gradient clipping") ap.add_argument("--num_processes", default=4, type=int, help="Number of parallel environments") ap.add_argument("--value_loss_coef", default=0.5, type=float, help="value loss coef") ap.add_argument("--gae_lambda", default=1.0, type=float, help="gae lambda") ap.add_argument("--entropy_coef", default=0.01, type=float, help="entropy coef") ap.add_argument("--gamma", default=0.99, type=float, help="Discounting factor") ap.add_argument("--total_steps", default=int(10e6), type=int, help="Training steps") ap.add_argument("--num_steps", default=20, type=int, help="number steps for update") ap.add_argument("--loss_freq", default=50, type=int, help="loss frequency") ap.add_argument("--eval_freq", default=2500, type=int, help="Evalualtion frequency") ap.add_argument("--lstm", action='store_true', help="Enable LSTM") ap.add_argument("--device", default=torch.device('cuda' if torch.cuda.is_available() else 'cpu'), help="Device for training") opt = ap.parse_args() return opt def train(make_env, shared_agent, optim, device, opt, process_number): total_steps = opt.total_steps num_steps = opt.num_steps loss_freq = opt.loss_freq eval_freq = opt.eval_freq max_grad_norm = opt.max_grad_norm gamma = opt.gamma checkpoint_path = opt.train_dir if process_number == 0 and not os.path.exists(checkpoint_path): os.mkdir(checkpoint_path) step = 0 evaluate = evaluate_A3C_lstm env = make_env(clip_rewards=False, lstm=opt.lstm) state = env.reset() grad_norm = 0 hidden_unit = None n_actions = env.action_space.n agent = A3C(n_actions=n_actions, lstm=opt.lstm) agent = agent.to(device) shared_agent = shared_agent.to(device) agent.train() shared_agent.train() if opt.checkpoint: agent.load_state_dict(torch.load(opt.checkpoint, map_location=torch.device(device))) step = int(re.findall(r'\d+', opt.checkpoint)[-1]) if process_number == 0: writer = SummaryWriter(opt.log_dir) episode_length = 0 episode_reward = 0 for step in range(step, total_steps + 1): agent.load_state_dict(shared_agent.state_dict()) log_policies = [] values = [] rewards = [] entropies = [] for _ in range(num_steps): (logits, value), hidden_unit = agent([state], hidden_unit) policy = F.softmax(logits, dim=1) log_policy = F.log_softmax(logits, dim=1) entropy = -(policy * log_policy).sum(1, keepdim=True) action = agent.sample_actions((logits, value)) state, reward, done, _ = env.step(action.squeeze()) episode_reward += reward episode_length += 1 if done: state = env.reset() hidden_unit = None if process_number == 0: writer.add_scalar("Episode/Length", episode_length, step) writer.add_scalar("Episode/Reward", episode_reward, step) episode_length = 0 episode_reward = 0 values.append(value) log_policies.append(log_policy.gather(1, action)) rewards.append(np.sign(reward)) entropies.append(entropy) if done: break R = torch.zeros((1, 1), dtype=torch.float).to(device) if not done: (_, R), _ = agent([state], hidden_unit) gae = torch.zeros((1, 1), dtype=torch.float).to(device) actor_loss = 0 critic_loss = 0 entropy_loss = 0 next_value = R for value, log_policy, reward, entropy in list(zip(values, log_policies, rewards, entropies))[::-1]: gae = gae * gamma * opt.gae_lambda gae = gae + reward + gamma * next_value - value next_value = value actor_loss = actor_loss + log_policy * gae.detach() R = R * gamma + reward critic_loss = critic_loss + (R - value) ** 2 / 2 entropy_loss = entropy_loss + entropy policy_loss = -actor_loss - opt.entropy_coef * entropy_loss total_loss = policy_loss + opt.value_loss_coef * critic_loss optim.zero_grad() total_loss.backward() grad_norm = nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm) for local_param, global_param in zip(agent.parameters(), shared_agent.parameters()): if global_param.grad is not None: break global_param._grad = local_param.grad optim.step() if process_number == 0 and step % loss_freq == 0: loss = total_loss.data.cpu().item() print("[{}] Loss: {} process: {}".format(step, loss, process_number + 1)) writer.add_scalar("Training/Loss", loss, step) writer.add_scalar("Training/Grad norm", grad_norm, step) writer.add_scalar("Training/Policy entropy", entropy_loss, step) if process_number == 0 and step % eval_freq == 0: mean_rw = evaluate(make_env(clip_rewards=False, lstm=opt.lstm), agent) writer.add_scalar("Mean reward", mean_rw, step) writer.close() torch.save(agent.state_dict(), os.path.join(checkpoint_path, "agent_{}.pth".format(step))) if process_number == 0: torch.save(agent.state_dict(), os.path.join(checkpoint_path, "agent_{}.pth".format(total_steps))) def main(): opt = get_args() assert opt.environment in environment.ENV_DICT.keys(), \ "Unsupported environment: {} \nSupported environemts: {}".format(opt.environment, environment.ENV_DICT.keys()) if opt.tpu: device = tpu.get_TPU() else: device = opt.device mp = _mp.get_context("spawn") ENV = environment.ENV_DICT[opt.environment] env = ENV.make_env(lstm=opt.lstm) state_shape = env.observation_space.shape n_actions = env.action_space.n shared_agent = A3C(n_actions=n_actions, lstm=opt.lstm).to(device) shared_agent.share_memory() optim = SharedAdam(shared_agent.parameters(), lr=opt.lr) optim.share_memory() processes = [] for rank in range(0, opt.num_processes): p = mp.Process(target=train, args=(ENV.make_env, shared_agent, optim, device, opt, rank)) p.start() processes.append(p) for p in processes: p.join() if __name__ == '__main__': main()
test_submit_handlers.py
# Copyright 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ from threading import Thread from time import time, sleep import sawtooth_validator.state.client_handlers as handlers from sawtooth_validator.protobuf import client_batch_submit_pb2 from sawtooth_validator.protobuf.client_batch_submit_pb2 \ import ClientBatchStatus from test_client_request_handlers.base_case import ClientHandlerTestCase from test_client_request_handlers.mocks import make_mock_batch from test_client_request_handlers.mocks import make_store_and_tracker A_0 = 'a' * 127 + '0' A_1 = 'a' * 127 + '1' A_2 = 'a' * 127 + '2' class TestBatchSubmitFinisher(ClientHandlerTestCase): def setUp(self): store, tracker = make_store_and_tracker() self.initialize( handlers.BatchSubmitFinisher(tracker), client_batch_submit_pb2.ClientBatchSubmitRequest, client_batch_submit_pb2.ClientBatchSubmitResponse, store=store, tracker=tracker) def test_batch_submit_without_wait(self): """Verifies finisher simply returns OK when not set to wait. Expects to find: - a response status of OK - no batch_statuses """ response = self.make_request(batches=[make_mock_batch('new')]) self.assertEqual(self.status.OK, response.status) def test_batch_submit_bad_request(self): """Verifies finisher breaks properly when sent a bad request. Expects to find: - a response status of INTERNAL_ERROR """ response = self.make_bad_request(batches=[make_mock_batch('new')]) self.assertEqual(self.status.INTERNAL_ERROR, response.status) class TestBatchStatusRequests(ClientHandlerTestCase): def setUp(self): store, tracker = make_store_and_tracker() self.initialize( handlers.BatchStatusRequest(tracker), client_batch_submit_pb2.ClientBatchStatusRequest, client_batch_submit_pb2.ClientBatchStatusResponse, store=store, tracker=tracker) def test_batch_statuses_in_store(self): """Verifies requests for status of a batch in the block store work. Queries the default mock block store with three blocks/batches: {header: {batch_ids: ['aaa...2'] ...}, header_signature: 'bbb...2' ...}, {header: {batch_ids: ['aaa...1'] ...}, header_signature: 'bbb...1' ...}, {header: {batch_ids: ['aaa...0'] ...}, header_signature: 'bbb...0' ...} Expects to find: - a response status of OK - a status of COMMITTED at key '0' in batch_statuses """ response = self.make_request(batch_ids=[A_0]) self.assertEqual(self.status.OK, response.status) self.assertEqual(response.batch_statuses[0].batch_id, A_0) self.assertEqual(response.batch_statuses[0].status, ClientBatchStatus.COMMITTED) def test_batch_statuses_bad_request(self): """Verifies bad requests for status of a batch break properly. Expects to find: - a response status of INTERNAL_ERROR """ response = self.make_bad_request(batch_ids=[A_0]) self.assertEqual(self.status.INTERNAL_ERROR, response.status) def test_batch_statuses_when_empty(self): """Verifies requests for batch statuses with no ids break properly. Expects to find: - a response status of NO_RESOURCE - that batch_statuses is empty """ response = self.make_request(batch_ids=[]) self.assertEqual(self.status.NO_RESOURCE, response.status) self.assertFalse(response.batch_statuses) def test_invalid_batch_statuses(self): """Verifies batch status requests marked INVALID by the tracker work. Queries the default mock batch tracker with invalid batch ids of: - 'aaa...f' Expects to find: - a response status of OK - a status of INVALID at key 'aaa...f' in batch_statuses - an invalid_transaction with * an 'id' of 'ccc...f' * a message of 'error message' * extended_data of b'error data' """ response = self.make_request(batch_ids=['a' * 127 + 'f']) self.assertEqual(self.status.OK, response.status) status = response.batch_statuses[0] self.assertEqual(status.batch_id, 'a' * 127 + 'f') self.assertEqual(status.status, ClientBatchStatus.INVALID) self.assertEqual(1, len(status.invalid_transactions)) invalid_txn = status.invalid_transactions[0] self.assertEqual(invalid_txn.transaction_id, 'c' * 127 + 'f') self.assertEqual(invalid_txn.message, 'error message') self.assertEqual(invalid_txn.extended_data, b'error data') def test_pending_batch_statuses(self): """Verifies batch status requests marked PENDING by the tracker work. Queries the default mock batch tracker with pending batch ids of: - 'aaa...d' Expects to find: - a response status of OK - a status of PENDING at key 'aaa...d' in batch_statuses """ response = self.make_request(batch_ids=['a' * 127 + 'd']) self.assertEqual(self.status.OK, response.status) self.assertEqual(response.batch_statuses[0].batch_id, 'a' * 127 + 'd') self.assertEqual(response.batch_statuses[0].status, ClientBatchStatus.PENDING) def test_batch_statuses_when_missing(self): """Verifies requests for status of a batch that is not found work. Expects to find: - a response status of OK - a status of UNKNOWN at key 'fff...' in batch_statuses """ response = self.make_request(batch_ids=['f' * 128]) self.assertEqual(self.status.OK, response.status) self.assertEqual(response.batch_statuses[0].batch_id, 'f' * 128) self.assertEqual(response.batch_statuses[0].status, ClientBatchStatus.UNKNOWN) def test_batch_statuses_when_invalid(self): """Verifies requests for status of a batch break with invalid ids. Expects to find: - a status of INVALID_ID - that the batch_statuses are missing """ response = self.make_request(batch_ids=['not', 'valid']) self.assertEqual(self.status.INVALID_ID, response.status) self.assertFalse(response.batch_statuses) def test_batch_statuses_for_many_batches(self): """Verifies requests for status of many batches work properly. Queries the default mock block store with three blocks/batches: {header: {batch_ids: ['aaa...2'] ...}, header_signature: 'bbb...2' ...}, {header: {batch_ids: ['aaa...1'] ...}, header_signature: 'bbb...1' ...}, {header: {batch_ids: ['aaa...0'] ...}, header_signature: 'bbb...0' ...} ...and the default mock batch tracker with pending batch ids of: - 'aaa...d' Expects to find: - a response status of OK - a status of COMMITTED at key 'aaa...1' in batch_statuses - a status of COMMITTED at key 'aaa...2' in batch_statuses - a status of PENDING at key 'aaa...d' in batch_statuses - a status of UNKNOWN at key 'fff...f' in batch_statuses """ response = self.make_request( batch_ids=[A_1, A_2, 'a' * 127 + 'd', 'f' * 128]) self.assertEqual(self.status.OK, response.status) self.assertEqual(response.batch_statuses[0].status, ClientBatchStatus.COMMITTED) self.assertEqual(response.batch_statuses[1].status, ClientBatchStatus.COMMITTED) self.assertEqual(response.batch_statuses[2].status, ClientBatchStatus.PENDING) self.assertEqual(response.batch_statuses[3].status, ClientBatchStatus.UNKNOWN) def test_batch_statuses_with_wait(self): """Verifies requests for status that wait for commit work properly. Queries the default mock block store which will have no block with the id 'aaa...e' until added by a separate thread. Expects to find: - less than 8 seconds to have passed (i.e. did not wait for timeout) - a response status of OK - a status of COMMITTED at key 'aaa...e' in batch_statuses """ self._tracker.notify_batch_pending(make_mock_batch('e')) start_time = time() def delayed_add(): sleep(1) self._store.add_block('e') self._tracker.chain_update(None, []) Thread(target=delayed_add).start() response = self.make_request( batch_ids=['a' * 127 + 'e'], wait=True, timeout=10) self.assertGreater(8, time() - start_time) self.assertEqual(self.status.OK, response.status) self.assertEqual(response.batch_statuses[0].status, ClientBatchStatus.COMMITTED) def test_batch_statuses_with_committed_wait(self): """Verifies requests for status that wait for commit work properly, when the batch is already committed. Expects to find: - less than 8 seconds to have passed (i.e. did not wait for timeout) - a response status of OK - a status of COMMITTED at key 'aaa...0' in batch_statuses """ start_time = time() response = self.make_request( batch_ids=[A_0], wait=True, timeout=10) self.assertGreater(8, time() - start_time) self.assertEqual(self.status.OK, response.status) self.assertEqual(response.batch_statuses[0].status, ClientBatchStatus.COMMITTED)
heartbeat.py
from threading import Thread, Timer from requests import get, post from requests.exceptions import ConnectionError, ConnectTimeout from document.agent.catalog import AgentCatalogDocument from document.exec_env import ExecEnvDocument from lib.http import HTTP_Status from lib.token import create_token from reader.arg import ArgReader from utils.log import Log def heartbeat(): """Heartbeat procedure with the LCPs.""" search = ExecEnvDocument.search() res = search[:search.count()].execute() threads = [] for exec_env in res: if exec_env.lcp: thread = Thread(target=heartbeat_exec_env, args=(exec_env,)) threads.append(thread) thread.start() for thread in threads: thread.join() thread = Timer(ArgReader.db.hb_period, heartbeat) thread.daemon = True thread.start() def heartbeat_exec_env(exec_env): log = Log.get('heartbeat') try: exec_env_id = exec_env.meta.id lcp = exec_env.lcp lbl = f'{exec_env_id} (LCP at {exec_env.hostname}:{lcp.port})' if exec_env.enabled: schema = 'https' if lcp.https else 'http' endpoint_lcp = exec_env.lcp.endpoint endpoint_lcp = '/' + endpoint_lcp if endpoint_lcp else '' req_uri = f'{schema}://{exec_env.hostname}:{lcp.port}{endpoint_lcp}/status' # noqa F401 resp = post(req_uri, timeout=ArgReader.db.hb_timeout, headers={'Authorization': create_token()}, json={'id': exec_env_id}) if resp.status_code == HTTP_Status.OK: data = resp.json() exec_env_id = data.pop('id', None) lcp.started = data.get('started', None) lcp.last_heartbeat = data.get('last_heartbeat', None) log.success(f'Connection established with exec-env {lbl}') else: lcp.last_heartbeat = None log.warning(f'Connection reset with exec-env {lbl}') log.notice(f'Response: {resp.content}') if not lcp.https: lcp.https = False resp = get(f'{schema}://{exec_env.hostname}:{lcp.port}{endpoint_lcp}/poll', # noqa F401 timeout=ArgReader.db.hb_timeout, headers={'Authorization': create_token()}) if resp.status_code == HTTP_Status.OK: data = resp.json() for agent_cat_data in data.get('agentType', []): AgentCatalogDocument.from_agent_type( agent_cat_data) exec_env_data = data.get('exec_env', {}) exec_env.meta.id = exec_env_data.pop('id') for field, lcp_data in exec_env_data.pop('lcp', {}).items(): setattr(exec_env.lcp, field, lcp_data) for field, ee_data in exec_env_data.items(): setattr(exec_env, field, ee_data) log.success(f'Polling established with exec-env {lbl}') else: log.warning(f'Polling not possible with exec-env {lbl}') exec_env.save() else: log.notice(f'Exec-env {lbl} not enabled') except ConnectTimeout: log.error(f'Connection timeout with exec-env {lbl}') except ConnectionError: log.error(f'Connection refused with exec-env {lbl}') except Exception as exception: log.exception( f'Exception during connection with exec-env {lbl}', exception)
main.py
# -*- coding: utf-8 -*- # Pomito - Pomodoro timer on steroids """Main interaction class.""" import logging import os import sys import threading from queue import Queue from peewee import SqliteDatabase import pomito.plugins from pomito.config import Configuration PACKAGE_NAME = "pomito" DATA_HOME = CONFIG_HOME = os.path.expanduser("~") if sys.platform.startswith("linux"): home_dir = os.getenv("HOME") DATA_HOME = os.getenv("XDG_DATA_HOME") or os.path.join(home_dir, ".local/share") CONFIG_HOME = os.getenv("XDG_CONFIG_HOME") or os.path.join(home_dir, ".config") DATA_DIR = os.path.join(DATA_HOME, PACKAGE_NAME) CONFIG_DIR = os.path.join(CONFIG_HOME, PACKAGE_NAME) logger = logging.getLogger(PACKAGE_NAME) class Message(object): """A wrapper for signals/parameters to be sent across plugins via the dispatcher.""" def __init__(self, signal, **kwargs): self.signal = signal self.kwargs = kwargs def send(self): self.signal.send(**self.kwargs) class MessageDispatcher(threading.Thread): """Simple queue based message dispatcher.""" def __init__(self): threading.Thread.__init__(self) self._message_queue = Queue() self._stop_event = threading.Event() def start(self): """ Starts the dispatcher. """ if threading.currentThread() == self: raise RuntimeError("Cannot call start on the thread itself.") threading.Thread.start(self) def stop(self): """Stops processing the queue. Doesn't ensure that queue is empty before stop. Message are thrown away. Look at _message_queue.join() in future. """ if threading.currentThread() == self: raise RuntimeError("Cannot call start on the thread itself.") self._stop_event.set() def queue_message(self, message): """Queue a Message to be dispatched. Args: message: message to be dispatched. Type: Message. """ if type(message) is not Message: raise TypeError("Only objects of type Message can be queued.") if message.signal.receivers: logger.info("MessageDispatcher: added message: " + message.kwargs.__str__()) logger.debug("MessageDispatcher: receivers: " + message.signal.receivers.__str__()) self._message_queue.put(message) else: logger.info("MessageDispatcher: skipped message: " + message.kwargs.__str__()) def run(self): """Worker for the message dispatcher thread.""" while self._stop_event.is_set() is False: while self._message_queue.empty() is False: message = self._message_queue.get() # It is possible that we a signal is dispatched to a receiver # not present during enqueue of the message, how ever is present # now. YAGNI call for the moment. logger.debug("MessageDispatcher: dispatch message: " + message.kwargs.__str__()) logger.debug("MessageDispatcher: receivers: " + message.signal.receivers.__str__()) message.send() logger.debug("MessageDispatcher: message dispatched!") self._message_queue.task_done() self._stop_event.wait(0.01) class Pomito(object): """Controls the application lifetime. Responsibilities: - Read and initialize the configuration - Choose the run mode - Handover execution to UI plugin """ def __init__(self, config=None, database=None, message_dispatcher=None): """Create a Pomito object. Arguments: config Configuration Path to the configuration file database peewee.SqliteDatabase database to use for tasks etc. message_dispatcher MessageDispatcher message dispatcher instance """ from pomito import pomodoro self._config = config self._database = database self._message_dispatcher = message_dispatcher self._threads = {} self._hooks = [] if self._message_dispatcher is None: self._message_dispatcher = MessageDispatcher() if self._config is None: self._config_file = os.path.join(CONFIG_DIR, "config.ini") self._config = Configuration(self._config_file) self._config.load() # Pomodoro service instance. Order of initializations are important self.pomodoro_service = pomodoro.Pomodoro(self) # Default plugins pomito.plugins.initialize(self.pomodoro_service) self.ui_plugin = pomito.plugins.get_plugin(self._config.ui_plugin) self.task_plugin = pomito.plugins.get_plugin(self._config.task_plugin) # Add the plugins to threads list self._threads['task_plugin'] = threading.Thread(target=self.task_plugin) # Default hooks from pomito.hooks import activity self._hooks.append(activity.ActivityHook(self.pomodoro_service)) return def initialize(self): """Initialize configuration, database and starts worker threads.""" os.makedirs(DATA_DIR, exist_ok=True) database_path = os.path.join(DATA_DIR, "pomito.db") if self._database is None: self._database = SqliteDatabase(None) self._database.init(database_path) self._database.connect() # Initialize the plugins self.ui_plugin.initialize() self.task_plugin.initialize() # Initialize the hooks for hook in self._hooks: hook.initialize() return def run(self): """Start the application.""" if not self._validate_state(): logger.critical("Pomito.Run: Invalid state. Exiting.") return self.initialize() self._message_dispatcher.start() self.ui_plugin.run() self.exit() def exit(self): """Clean up and save any configuration data. Prepare for exiting the application.""" if self._message_dispatcher.is_alive(): self._message_dispatcher.stop() self._message_dispatcher.join() for hook in self._hooks: hook.close() if self._database is not None: self._database.close() def get_db(self): """Get the database object. Returns: database peewee.SqliteDatabase object """ return self._database def get_configuration(self): return self._config def queue_signal(self, message): self._message_dispatcher.queue_message(message) def _validate_state(self): """Validates configuration, plugins.""" import pomito.plugins _retval = True if not issubclass(type(self.ui_plugin), pomito.plugins.ui.UIPlugin): logger.error("Invalid UIPlugin object = {0}".format(self.ui_plugin)) _retval = False if not issubclass(type(self.task_plugin), pomito.plugins.task.TaskPlugin): logger.error("Invalid TaskPlugin object = {0}".format(self.task_plugin)) _retval = False return _retval def main(): p = Pomito() p.run()
catchFrog.py
#!/usr/bin/env python # coding=utf-8 from __future__ import division import cv2 import Adafruit_PCA9685 import time import numpy as np import threading import math pwm = Adafruit_PCA9685.PCA9685() pwm.set_pwm_freq(60) cap = cv2.VideoCapture(0) hsv_min = np.array([45,83,86]) hsv_max = np.array([77,255,255]) zero_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / 2 # 640 / 2 zero_hight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / 2 # 480 / 2 class Point(object): def __init__(self, x, y, step): self.__x = x self.__y = y self.__step = step def xAdd(self, step=None): if None is not step: self.__x += step else: self.__x += self.__step def yAdd(self, step=None): if None is not step: self.__y += step else: self.__y += self.__step def xSub(self, step=None): if None is not step: self.__x -= step else: self.__x -= self.__step def ySub(self, step=None): if None is not step: self.__y -= step else: self.__y -= self.__step def getAddr(self): return self.__x, self.__y def calculationLogic(x, y): a = math.pow((x - zero_width), 2) b = math.pow((y - zero_hight), 2) return math.sqrt(a + b) def driveReverse(): pwm.set_pwm(12, 0, 550) pwm.set_pwm(14, 0, 550) def driveBraking(): pwm.set_pwm(12, 0, 350) pwm.set_pwm(14, 0, 350) def driveAhead(): pwm.set_pwm(12, 0, 150) pwm.set_pwm(14, 0, 150) def driveLeft(): pwm.set_pwm(12, 0, 350) pwm.set_pwm(14, 0, 150) def driveRight(): pwm.set_pwm(12, 0, 150) pwm.set_pwm(14, 0, 350) def moveSteering(X_P, Y_P): if 600 < X_P: X_P = 600 if 100 > X_P: X_P = 100 if 590 < Y_P: Y_P = 590 if 200 > Y_P: Y_P = 200 pwm.set_pwm(0, 0, X_P) pwm.set_pwm(1, 0, Y_P) def findTarget(frame): frame = cv2.GaussianBlur(frame, (5, 5), 0) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, hsv_min, hsv_max) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) mask = cv2.GaussianBlur(mask, (3, 3), 0) res = cv2.bitwise_and(frame, frame, mask=mask) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] if 0 >= len(cnts): return False, cnts else: return True, cnts def targetTracking(cnts, p, frame=None): cnt = max(cnts, key=cv2.contourArea) (x, y), radius = cv2.minEnclosingCircle(cnt) if 10 > radius: return x, y, radius = int(x), int(y), int(radius) if None is not frame: cv2.rectangle(frame, (x - radius, y - radius), (x + radius, y + radius), (0, 255, 0), 2) if calculationLogic(x, y) < 50: return if x > zero_width: print("右") p.xSub() else: print("左") p.xAdd() if y > zero_hight: print("下") p.ySub() else: print("上") p.yAdd() print("move to %d, %d" % (p.getAddr())) tid = threading.Thread(target=moveSteering, args=(p.getAddr())) tid.setDaemon(True) tid.start() p = Point(250, 390, 1) x, y = p.getAddr() moveSteering(x, y) while True: ret, frame = cap.read() if False is ret: print("read image from video failed!") break; find, cnts = findTarget(frame) if find: targetTracking(cnts, p, frame) cv2.imshow("cat rog", frame) if 119 == cv2.waitKey(5): # pass w break cap.release() cv2.destroyAllWindows()
create_gtsdb_tf_records.py
# Copyright 2018 Changan Wang # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import os import random import sys import threading import xml.etree.ElementTree as xml_tree import re import numpy as np import six import tensorflow as tf import dataset_common '''How to organize your dataset folder: VOCROOT/ |->VOC2007/ | |->Annotations/ | |->ImageSets/ | |->... |->VOC2012/ | |->Annotations/ | |->ImageSets/ | |->... |->VOC2007TEST/ | |->Annotations/ | |->... ''' tf.app.flags.DEFINE_string('dataset_directory', './', 'All datas directory') #tf.app.flags.DEFINE_string('train_splits', 'VOC2007, VOC2012', tf.app.flags.DEFINE_string('train_splits', 'train', 'Comma-separated list of the training data sub-directory') tf.app.flags.DEFINE_string('validation_splits', 'test', 'Comma-separated list of the validation data sub-directory') tf.app.flags.DEFINE_string('output_directory', './tfrecords', 'Output data directory') tf.app.flags.DEFINE_integer('train_shards', 16, 'Number of shards in training TFRecord files.') tf.app.flags.DEFINE_integer('validation_shards', 16, 'Number of shards in validation TFRecord files.') tf.app.flags.DEFINE_integer('num_threads', 8, 'Number of threads to preprocess the images.') RANDOM_SEED = 180428 FLAGS = tf.app.flags.FLAGS def _int64_feature(value): """Wrapper for inserting int64 features into Example proto.""" if not isinstance(value, list): value = [value] return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) def _float_feature(value): """Wrapper for inserting float features into Example proto.""" if not isinstance(value, list): value = [value] return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def _bytes_list_feature(value): """Wrapper for inserting a list of bytes features into Example proto. """ if not isinstance(value, list): value = [value] return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) def _bytes_feature(value): """Wrapper for inserting bytes features into Example proto.""" if isinstance(value, six.string_types): value = six.binary_type(value, encoding='utf-8') return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _convert_to_example(filename, image_name, image_buffer, bboxes, labels, labels_text, difficult, truncated, height, width): """Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image bboxes: List of bounding boxes for each image labels: List of labels for bounding box labels_text: List of labels' name for bounding box difficult: List of ints indicate the difficulty of that bounding box truncated: List of ints indicate the truncation of that bounding box height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto """ ymin = [] xmin = [] ymax = [] xmax = [] for b in bboxes: assert len(b) == 4 # pylint: disable=expression-not-assigned [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)] # pylint: enable=expression-not-assigned channels = 3 image_format = 'JPEG' example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': _int64_feature(height), 'image/width': _int64_feature(width), 'image/channels': _int64_feature(channels), 'image/shape': _int64_feature([height, width, channels]), 'image/object/bbox/xmin': _float_feature(xmin), 'image/object/bbox/xmax': _float_feature(xmax), 'image/object/bbox/ymin': _float_feature(ymin), 'image/object/bbox/ymax': _float_feature(ymax), 'image/object/bbox/label': _int64_feature(labels), 'image/object/bbox/label_text': _bytes_list_feature(labels_text), 'image/object/bbox/difficult': _int64_feature(difficult), 'image/object/bbox/truncated': _int64_feature(truncated), 'image/format': _bytes_feature(image_format), 'image/filename': _bytes_feature(image_name.encode('utf8')), 'image/encoded': _bytes_feature(image_buffer)})) return example class ImageCoder(object): """Helper class that provides TensorFlow image coding utilities.""" def __init__(self): # Create a single Session to run all image coding calls. self._sess = tf.Session() # Initializes function that converts PNG to JPEG data. self._png_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_png(self._png_data, channels=3) self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that converts CMYK JPEG data to RGB JPEG data. self._cmyk_data = tf.placeholder(dtype=tf.string) image = tf.image.decode_jpeg(self._cmyk_data, channels=0) self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) # Initializes function that decodes RGB JPEG data. self._decode_jpeg_data = tf.placeholder(dtype=tf.string) self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) def png_to_jpeg(self, image_data): return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data}) def cmyk_to_rgb(self, image_data): return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}) def decode_jpeg(self, image_data): image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3 assert image.shape[2] == 3 return image def _process_image(filename, coder): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. with tf.gfile.FastGFile(filename, 'rb') as f: image_data = f.read() # Decode the RGB JPEG. image = coder.decode_jpeg(image_data) # Check that image converted to RGB assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 return image_data, height, width def _find_image_bounding_boxes(directory, cur_record): """Find the bounding boxes for a given image file. Args: directory: string; the path of all datas. cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename. Returns: bboxes: List of bounding boxes for each image. labels: List of labels for bounding box. labels_text: List of labels' name for bounding box. difficult: List of ints indicate the difficulty of that bounding box. truncated: List of ints indicate the truncation of that bounding box. """ #anna_file = os.path.join(directory, cur_record[0], 'Annotations', cur_record[1].replace('jpg', 'xml')) #tree = xml_tree.parse(anna_file) #root = tree.getroot() # Image shape. #size = root.find('size') shape = [int(800), int(1360), int(3)] # Find annotations. bboxes = [] labels = [] labels_text = [] difficult = [] truncated = [] gt_path = os.path.join(directory, "../gt.txt") gt = open(gt_path, "r") for line in gt: if re.match(cur_record[1][0:5] + "(.*)", line): obj=re.findall(r'\d+',line) labels.append(int(dataset_common.GTSDB_LABELS_MAIN[str(int(obj[5])+1)][0])) #let 0 be background class labels_text.append(dataset_common.GTSDB_LABELS_MAIN[str(int(obj[5])+1)][1].encode('ascii')) difficult.append(0) truncated.append(0) bboxes.append((float(obj[2]) / shape[0], float(obj[1]) / shape[1], float(obj[4]) / shape[0], float(obj[3]) / shape[1] )) # print(cur_record[1][0:5]) # print(shape) # print(bboxes) # print(labels) # print(labels_text) # print(difficult) # print(truncated) return bboxes, labels, labels_text, difficult, truncated def _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards): """Processes and saves list of images as TFRecord in 1 thread. Args: coder: instance of ImageCoder to provide TensorFlow image coding utils. thread_index: integer, unique batch to run index is within [0, len(ranges)). ranges: list of pairs of integers specifying ranges of each batches to analyze in parallel. name: string, unique identifier specifying the data set directory: string; the path of all datas all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename. num_shards: integer number of shards for this data set. """ # Each thread produces N shards where N = int(num_shards / num_threads). # For instance, if num_shards = 128, and the num_threads = 2, then the first # thread would produce shards [0, 64). num_threads = len(ranges) assert not num_shards % num_threads num_shards_per_batch = int(num_shards / num_threads) shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], num_shards_per_batch + 1).astype(int) num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] counter = 0 for s in range(num_shards_per_batch): # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' shard = thread_index * num_shards_per_batch + s output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) output_file = os.path.join(FLAGS.output_directory, output_filename) writer = tf.python_io.TFRecordWriter(output_file) shard_counter = 0 files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) print(directory) for i in files_in_shard: cur_record = all_records[i] filename = os.path.join(directory, cur_record[0], '', cur_record[1]) bboxes, labels, labels_text, difficult, truncated = _find_image_bounding_boxes(directory, cur_record) image_buffer, height, width = _process_image(filename, coder) example = _convert_to_example(filename, cur_record[1], image_buffer, bboxes, labels, labels_text, difficult, truncated, height, width) writer.write(example.SerializeToString()) shard_counter += 1 counter += 1 if not counter % 1000: print('%s [thread %d]: Processed %d of %d images in thread batch.' % (datetime.now(), thread_index, counter, num_files_in_thread)) sys.stdout.flush() writer.close() print('%s [thread %d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_file)) sys.stdout.flush() shard_counter = 0 print('%s [thread %d]: Wrote %d images to %d shards.' % (datetime.now(), thread_index, counter, num_files_in_thread)) sys.stdout.flush() def _process_image_files(name, directory, all_records, num_shards): """Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set directory: string; the path of all datas all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename. num_shards: integer number of shards for this data set. """ # Break all images into batches with a [ranges[i][0], ranges[i][1]]. spacing = np.linspace(0, len(all_records), FLAGS.num_threads + 1).astype(np.int) ranges = [] threads = [] for i in range(len(spacing) - 1): ranges.append([spacing[i], spacing[i + 1]]) # Launch a thread for each batch. print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) sys.stdout.flush() # Create a mechanism for monitoring when all threads are finished. coord = tf.train.Coordinator() # Create a generic TensorFlow-based utility for converting all image codings. coder = ImageCoder() threads = [] for thread_index in range(len(ranges)): args = (coder, thread_index, ranges, name, directory, all_records, num_shards) t = threading.Thread(target=_process_image_files_batch, args=args) t.start() threads.append(t) # Wait for all the threads to terminate. coord.join(threads) print('%s: Finished writing all %d images in data set.' % (datetime.now(), len(all_records))) sys.stdout.flush() def _process_dataset(name, directory, all_splits, num_shards): """Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. all_splits: list of strings, sub-path to the data set. num_shards: integer number of shards for this data set. """ all_records = [] # print(name) # print(directory) # print(all_splits) # print(num_shards) for split in all_splits: jpeg_file_path = os.path.join(directory, split, '') images = tf.gfile.ListDirectory(jpeg_file_path) jpegs = [im_name for im_name in images if im_name.strip()[-3:]=='jpg'] all_records.extend(list(zip([split] * len(jpegs), jpegs))) shuffled_index = list(range(len(all_records))) random.seed(RANDOM_SEED) random.shuffle(shuffled_index) all_records = [all_records[i] for i in shuffled_index] _process_image_files(name, directory, all_records, num_shards) def parse_comma_list(args): return [s.strip() for s in args.split(',')] def main(unused_argv): assert not FLAGS.train_shards % FLAGS.num_threads, ( 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') assert not FLAGS.validation_shards % FLAGS.num_threads, ( 'Please make the FLAGS.num_threads commensurate with ' 'FLAGS.validation_shards') print('Saving results to %s' % FLAGS.output_directory) # Run it! _process_dataset('val', FLAGS.dataset_directory, parse_comma_list(FLAGS.validation_splits), FLAGS.validation_shards) _process_dataset('train', FLAGS.dataset_directory, parse_comma_list(FLAGS.train_splits), FLAGS.train_shards) if __name__ == '__main__': tf.app.run()
save_events.py
#!/usr/bin/env python """Extracts messages from CAN Bus interface and save to file""" import os import time import threading import queue import zipfile import can from can import Message # List of OBD-II parameter Ids to query PIDS = { "vehicle_speed": Message( arbitration_id=0x7DF, extended_id=False, data=[0x2, 0x1, 0xD, 0x55, 0x55, 0x55, 0x55, 0x55], ), "engine_load": Message( arbitration_id=0x7DF, extended_id=False, data=[0x2, 0x1, 0x4, 0x55, 0x55, 0x55, 0x55, 0x55], ), "coolant_temp": Message( arbitration_id=0x7DF, extended_id=False, data=[0x2, 0x1, 0x5, 0x55, 0x55, 0x55, 0x55, 0x55], ), "engine_rpm": Message( arbitration_id=0x7DF, extended_id=False, data=[0x2, 0x1, 0xC, 0x55, 0x55, 0x55, 0x55, 0x55], ), "throttle_position": Message( arbitration_id=0x7DF, extended_id=False, data=[0x2, 0x1, 0x11, 0x55, 0x55, 0x55, 0x55, 0x55], ), "ambient_air_temperature": Message( arbitration_id=0x7DF, extended_id=False, data=[0x2, 0x1, 0x46, 0x55, 0x55, 0x55, 0x55, 0x55], ), } # Intermediate format is: "timestamp.nnnn ID DATA" CANBUS_DATA_FORMAT = "{} {:02X} {}" def bus_request(bus, pids, run_event): """Request parameters of interest on the bus every 20ms, bus_response reads the responses""" while run_event.is_set(): for i in pids: try: bus.send(pids[i], timeout=0.02) except can.interfaces.kvaser.canlib.CANLIBError: bus.flush_tx_buffer() print("error") # Pause 50ms between queries time.sleep(0.05) def bus_response(bus, q): """ Continiously read the CAN Bus and queues entries of interest, filtered to OBD-II class messages """ for msg in bus: # Only log common OBD-II parameters: if msg.arbitration_id == 0x7E8: q.put( CANBUS_DATA_FORMAT.format( time.time(), msg.arbitration_id, msg.data.hex().upper() ) ) def persist_data(q, run_event): """Read data from queue and persist to local file""" total_events = 0 f = open("events.txt", "a", buffering=512) while run_event.is_set(): try: event = q.get(False) f.write(f"{event}\n") total_events += 1 if total_events % 200 == 0: print(f"read and written {total_events} events") except queue.Empty: # No work to process, continue pass f.close() # Common elements used by all extract methods message_queue = queue.Queue() # Connect to data source # This is specific to the Kvaser Leaf Light v2 data logger, # replace with specifics for your CAN Bus device bus = can.Bus(interface="kvaser", channel=0, receive_own_messages=True) # Setup threads to interact with CAN Bus, read data, and persist to data store run_event = threading.Event() run_event.set() worker_canbus_request = threading.Thread( target=bus_request, args=[bus, PIDS, run_event] ) worker_canbus_response = threading.Thread( target=bus_response, args=[bus, message_queue] ) worker_persist = threading.Thread(target=persist_data, args=[message_queue, run_event]) # Start workers in reverse order, so messages aren't missed worker_persist.start() worker_canbus_response.start() worker_canbus_request.start() try: while True: # Until keyboard interrupt pass except KeyboardInterrupt: print("Closing threads") run_event.clear() worker_canbus_request.join() worker_persist.join() print("Threads successfully closed") os._exit(0)
streaming.py
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. # Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets from __future__ import absolute_import, print_function import logging import re import requests import sys from requests.exceptions import Timeout from threading import Thread from time import sleep import six import ssl from tweepy.models import Status from tweepy.api import API from tweepy.error import TweepError from tweepy.utils import import_simplejson json = import_simplejson() STREAM_VERSION = '1.1' class StreamListener(object): def __init__(self, api=None): self.api = api or API() def on_connect(self): """Called once connected to streaming server. This will be invoked once a successful response is received from the server. Allows the listener to perform some work prior to entering the read loop. """ pass def on_data(self, raw_data): """Called when raw data is received from connection. Override this method if you wish to manually handle the stream data. Return False to stop stream and close connection. """ data = json.loads(raw_data) if 'in_reply_to_status_id' in data: status = Status.parse(self.api, data) if self.on_status(status) is False: return False elif 'delete' in data: delete = data['delete']['status'] if self.on_delete(delete['id'], delete['user_id']) is False: return False elif 'event' in data: status = Status.parse(self.api, data) if self.on_event(status) is False: return False elif 'direct_message' in data: status = Status.parse(self.api, data) if self.on_direct_message(status) is False: return False elif 'friends' in data: if self.on_friends(data['friends']) is False: return False elif 'limit' in data: if self.on_limit(data['limit']['track']) is False: return False elif 'disconnect' in data: if self.on_disconnect(data['disconnect']) is False: return False elif 'warning' in data: if self.on_warning(data['warning']) is False: return False else: logging.error("Unknown message type: " + str(raw_data)) def keep_alive(self): """Called when a keep-alive arrived""" return def on_status(self, status): """Called when a new status arrives""" return def on_exception(self, exception): """Called when an unhandled exception occurs.""" return def on_delete(self, status_id, user_id): """Called when a delete notice arrives for a status""" return def on_event(self, status): """Called when a new event arrives""" return def on_direct_message(self, status): """Called when a new direct message arrives""" return def on_friends(self, friends): """Called when a friends list arrives. friends is a list that contains user_id """ return def on_limit(self, track): """Called when a limitation notice arrives""" return def on_error(self, status_code): """Called when a non-200 status code is returned""" return False def on_timeout(self): """Called when stream connection times out""" return def on_disconnect(self, notice): """Called when twitter sends a disconnect notice Disconnect codes are listed here: https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect """ return def on_warning(self, notice): """Called when a disconnection warning message arrives""" return class ReadBuffer(object): """Buffer data from the response in a smarter way than httplib/requests can. Tweets are roughly in the 2-12kb range, averaging around 3kb. Requests/urllib3/httplib/socket all use socket.read, which blocks until enough data is returned. On some systems (eg google appengine), socket reads are quite slow. To combat this latency we can read big chunks, but the blocking part means we won't get results until enough tweets have arrived. That may not be a big deal for high throughput systems. For low throughput systems we don't want to sacrafice latency, so we use small chunks so it can read the length and the tweet in 2 read calls. """ def __init__(self, stream, chunk_size, encoding='utf-8'): self._stream = stream self._buffer = six.b('') self._chunk_size = chunk_size self._encoding = encoding def read_len(self, length): while not self._stream.closed: if len(self._buffer) >= length: return self._pop(length) read_len = max(self._chunk_size, length - len(self._buffer)) self._buffer += self._stream.read(read_len) return six.b('') def read_line(self, sep=six.b('\n')): """Read the data stream until a given separator is found (default \n) :param sep: Separator to read until. Must by of the bytes type (str in python 2, bytes in python 3) :return: The str of the data read until sep """ start = 0 while not self._stream.closed: loc = self._buffer.find(sep, start) if loc >= 0: return self._pop(loc + len(sep)) else: start = len(self._buffer) self._buffer += self._stream.read(self._chunk_size) return six.b('') def _pop(self, length): r = self._buffer[:length] self._buffer = self._buffer[length:] return r.decode(self._encoding) class Stream(object): host = 'stream.twitter.com' def __init__(self, auth, listener, **options): self.auth = auth self.listener = listener self.running = False self.timeout = options.get("timeout", 300.0) self.retry_count = options.get("retry_count") # values according to # https://dev.twitter.com/docs/streaming-apis/connecting#Reconnecting self.retry_time_start = options.get("retry_time", 5.0) self.retry_420_start = options.get("retry_420", 60.0) self.retry_time_cap = options.get("retry_time_cap", 320.0) self.snooze_time_step = options.get("snooze_time", 0.25) self.snooze_time_cap = options.get("snooze_time_cap", 16) # The default socket.read size. Default to less than half the size of # a tweet so that it reads tweets with the minimal latency of 2 reads # per tweet. Values higher than ~1kb will increase latency by waiting # for more data to arrive but may also increase throughput by doing # fewer socket read calls. self.chunk_size = options.get("chunk_size", 512) self.verify = options.get("verify", True) self.api = API() self.headers = options.get("headers") or {} self.new_session() self.body = None self.retry_time = self.retry_time_start self.snooze_time = self.snooze_time_step def new_session(self): self.session = requests.Session() self.session.headers = self.headers self.session.params = None def _run(self): # Authenticate url = "https://%s%s" % (self.host, self.url) # Connect and process the stream error_counter = 0 resp = None exc_info = None while self.running: if self.retry_count is not None: if error_counter > self.retry_count: # quit if error count greater than retry count break try: auth = self.auth.apply_auth() resp = self.session.request('POST', url, data=self.body, timeout=self.timeout, stream=True, auth=auth, verify=self.verify) if resp.status_code != 200: if self.listener.on_error(resp.status_code) is False: break error_counter += 1 if resp.status_code == 420: self.retry_time = max(self.retry_420_start, self.retry_time) sleep(self.retry_time) self.retry_time = min(self.retry_time * 2, self.retry_time_cap) else: error_counter = 0 self.retry_time = self.retry_time_start self.snooze_time = self.snooze_time_step self.listener.on_connect() self._read_loop(resp) except (Timeout, ssl.SSLError) as exc: # This is still necessary, as a SSLError can actually be # thrown when using Requests # If it's not time out treat it like any other exception if isinstance(exc, ssl.SSLError): if not (exc.args and 'timed out' in str(exc.args[0])): exc_info = sys.exc_info() break if self.listener.on_timeout() is False: break if self.running is False: break sleep(self.snooze_time) self.snooze_time = min(self.snooze_time + self.snooze_time_step, self.snooze_time_cap) except Exception as exc: exc_info = sys.exc_info() # any other exception is fatal, so kill loop break # cleanup self.running = False if resp: resp.close() self.new_session() if exc_info: # call a handler first so that the exception can be logged. self.listener.on_exception(exc_info[1]) six.reraise(*exc_info) def _data(self, data): if self.listener.on_data(data) is False: self.running = False def _read_loop(self, resp): charset = resp.headers.get('content-type', default='') enc_search = re.search('charset=(?P<enc>\S*)', charset) if enc_search is not None: encoding = enc_search.group('enc') else: encoding = 'utf-8' buf = ReadBuffer(resp.raw, self.chunk_size, encoding=encoding) while self.running and not resp.raw.closed: length = 0 while not resp.raw.closed: line = buf.read_line() if not line: self.listener.keep_alive() # keep-alive new lines are expected elif line.strip().isdigit(): length = int(line) break else: # TODO: This triggers often with blank lines. Diagnose. #print('Error with line: \'{}\''.format(line) self.listener.keep_alive() #raise TweepError('Expecting length, unexpected value found') next_status_obj = buf.read_len(length) if self.running and next_status_obj: self._data(next_status_obj) # # Note: keep-alive newlines might be inserted before each length value. # # read until we get a digit... # c = b'\n' # for c in resp.iter_content(decode_unicode=True): # if c == b'\n': # continue # break # # delimited_string = c # # # read rest of delimiter length.. # d = b'' # for d in resp.iter_content(decode_unicode=True): # if d != b'\n': # delimited_string += d # continue # break # # # read the next twitter status object # if delimited_string.decode('utf-8').strip().isdigit(): # status_id = int(delimited_string) # next_status_obj = resp.raw.read(status_id) # if self.running: # self._data(next_status_obj.decode('utf-8')) if resp.raw.closed: self.on_closed(resp) def _start(self, run_async): self.running = True if run_async: self._thread = Thread(target=self._run) self._thread.start() else: self._run() def on_closed(self, resp): """ Called when the response has been closed by Twitter """ pass def userstream(self, stall_warnings=False, _with=None, replies=None, track=None, locations=None, run_async=False, encoding='utf8'): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/user.json' % STREAM_VERSION self.host = 'userstream.twitter.com' if stall_warnings: self.session.params['stall_warnings'] = stall_warnings if _with: self.session.params['with'] = _with if replies: self.session.params['replies'] = replies if locations and len(locations) > 0: if len(locations) % 4 != 0: raise TweepError("Wrong number of locations points, " "it has to be a multiple of 4") self.session.params['locations'] = ','.join(['%.2f' % l for l in locations]) if track: self.session.params['track'] = u','.join(track).encode(encoding) self._start(run_async) def firehose(self, count=None, run_async=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/firehose.json' % STREAM_VERSION if count: self.url += '&count=%s' % count self._start(run_async) def retweet(self, run_async=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/retweet.json' % STREAM_VERSION self._start(run_async) def sample(self, run_async=False, languages=None, stall_warnings=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/sample.json' % STREAM_VERSION if languages: self.session.params['language'] = ','.join(map(str, languages)) if stall_warnings: self.session.params['stall_warnings'] = 'true' self._start(run_async) def filter(self, follow=None, track=None, run_async=False, locations=None, stall_warnings=False, languages=None, encoding='utf8', filter_level=None): self.body = {} self.session.headers['Content-type'] = "application/x-www-form-urlencoded" if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/filter.json' % STREAM_VERSION if follow: self.body['follow'] = u','.join(follow).encode(encoding) if track: self.body['track'] = u','.join(track).encode(encoding) if locations and len(locations) > 0: if len(locations) % 4 != 0: raise TweepError("Wrong number of locations points, " "it has to be a multiple of 4") self.body['locations'] = u','.join(['%.4f' % l for l in locations]) if stall_warnings: self.body['stall_warnings'] = stall_warnings if languages: self.body['language'] = u','.join(map(str, languages)) if filter_level: self.body['filter_level'] = filter_level.encode(encoding) self.session.params = {'delimited': 'length'} self.host = 'stream.twitter.com' self._start(run_async) def sitestream(self, follow, stall_warnings=False, with_='user', replies=False, run_async=False): self.body = {} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/site.json' % STREAM_VERSION self.body['follow'] = u','.join(map(six.text_type, follow)) self.body['delimited'] = 'length' if stall_warnings: self.body['stall_warnings'] = stall_warnings if with_: self.body['with'] = with_ if replies: self.body['replies'] = replies self._start(run_async) def disconnect(self): if self.running is False: return self.running = False
danmu_controller.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2017/12/7 # @Author : wangmengcn # @Email : eclipse_sv@163.com from time import sleep from threading import Thread from .danmu_factory import DouyuDM from . import r def create_danmu_pool(): newroom_pubsub = r.pubsub() newroom_pubsub.subscribe('danmu:createroom') delroom_pubsub = r.pubsub() delroom_pubsub.subscribe('danmu:destoryroom') danmu_pool = dict() while True: sleep(3) create_room_msg = newroom_pubsub.get_message() create_room_id = create_room_msg.setdefault('data', None) if create_room_msg else None if create_room_id: if not danmu_pool.get(create_room_id, None): print('新建room:{}'.format(create_room_id)) danmu = DouyuDM(create_room_id) danmu.connect_to_server() danmu_thread = Thread(target=danmu.publish_danmu) danmu_thread.start() danmu_pool.setdefault(create_room_id, danmu) else: print('room:{} 已建立'.format(create_room_id)) destory_room_msg = delroom_pubsub.get_message() destory_room_id = destory_room_msg.setdefault('data', None) if destory_room_msg else None if destory_room_id: print('销毁room:{}'.format(destory_room_id)) destory_room = danmu_pool.get(destory_room_id, None) if destory_room: destory_room.terminate() del danmu_pool[destory_room_id] print('room:{} 已销毁'.format(destory_room_id)) if __name__ == '__main__': create_danmu_pool()
lazy_list.py
class lazy_list(): """ Convert input into a list like object in the background or on demand. Useful when processing data that is going to take time to load, but we want to start processing immediately, and are able to process the input mostly linearly (accessing the last element would defeat this). Note: If you apply a transformation to the result of this list you will end up defeating the point of using it. It is better to apply the transformation to the INPUT to this class by passing in a generator function that applies the transformation as we request the entries. If you really do need to transform the output, consider doing so in a lazy fashion. Note: This doesn't implement everything we need to be a full duck typed list, and I don't want to subclass, because then a bunch of things would appear to work that in reality don't without overriding them anyway (index, len, etc.). It's better to add functionality as we need it. """ def __init__(self, input): import threading self._list = [] self._done = False t = threading.Thread(target=self._worker, args=[input]) t.daemon = True # Don't prevent termination t.start() def _worker(self, input): for x in input: self._list.append(x) self._done = True def __getitem__(self, idx): while not self._done: try: return self._list[idx] except IndexError: import time time.sleep(0.5) return self._list[idx]
helpers.py
"""Supporting functions for polydata and grid objects.""" import collections.abc import enum import logging import os import signal import sys import threading from threading import Thread import traceback from typing import Optional import warnings import numpy as np import pyvista from pyvista import _vtk from . import transformations from .fileio import from_meshio class FieldAssociation(enum.Enum): """Represents which type of vtk field a scalar or vector array is associated with.""" POINT = _vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS CELL = _vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS NONE = _vtk.vtkDataObject.FIELD_ASSOCIATION_NONE ROW = _vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS def get_vtk_type(typ): """Look up the VTK type for a given numpy data type. Corrects for string type mapping issues. Parameters ---------- typ : numpy.dtype Numpy data type. Returns ------- int Integer type id specified in ``vtkType.h`` """ typ = _vtk.get_vtk_array_type(typ) # This handles a silly string type bug if typ == 3: return 13 return typ def vtk_bit_array_to_char(vtkarr_bint): """Cast vtk bit array to a char array. Parameters ---------- vtkarr_bint : vtk.vtkBitArray VTK binary array. Returns ------- vtk.vtkCharArray VTK char array. Notes ----- This performs a copy. """ vtkarr = _vtk.vtkCharArray() vtkarr.DeepCopy(vtkarr_bint) return vtkarr def vtk_id_list_to_array(vtk_id_list): """Convert a vtkIdList to a NumPy array. Parameters ---------- vtk_id_list : vtk.vtkIdList VTK ID list. Returns ------- numpy.ndarray Array of IDs. """ return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())]) def convert_string_array(arr, name=None): """Convert a numpy array of strings to a vtkStringArray or vice versa. Parameters ---------- arr : numpy.ndarray Numpy string array to convert. name : str, optional Name to set the vtkStringArray to. Returns ------- vtkStringArray VTK string array. Notes ----- Note that this is terribly inefficient. If you have ideas on how to make this faster, please consider opening a pull request. """ if isinstance(arr, np.ndarray): vtkarr = _vtk.vtkStringArray() ########### OPTIMIZE ########### for val in arr: vtkarr.InsertNextValue(val) ################################ if isinstance(name, str): vtkarr.SetName(name) return vtkarr # Otherwise it is a vtk array and needs to be converted back to numpy ############### OPTIMIZE ############### nvalues = arr.GetNumberOfValues() return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U') ######################################## def convert_array(arr, name=None, deep=False, array_type=None): """Convert a NumPy array to a vtkDataArray or vice versa. Parameters ---------- arr : np.ndarray or vtkDataArray A numpy array or vtkDataArry to convert. name : str, optional The name of the data array for VTK. deep : bool, optional If input is numpy array then deep copy values. array_type : int, optional VTK array type ID as specified in specified in ``vtkType.h``. Returns ------- vtkDataArray, numpy.ndarray, or DataFrame The converted array. If input is a :class:`numpy.ndarray` then returns ``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy ``ndarray``. """ if arr is None: return if isinstance(arr, np.ndarray): if arr.dtype == np.dtype('O'): arr = arr.astype('|S') arr = np.ascontiguousarray(arr) if arr.dtype.type in (np.str_, np.bytes_): # This handles strings vtk_data = convert_string_array(arr) else: # This will handle numerical data arr = np.ascontiguousarray(arr) vtk_data = _vtk.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type) if isinstance(name, str): vtk_data.SetName(name) return vtk_data # Otherwise input must be a vtkDataArray if not isinstance(arr, (_vtk.vtkDataArray, _vtk.vtkBitArray, _vtk.vtkStringArray)): raise TypeError(f'Invalid input array type ({type(arr)}).') # Handle booleans if isinstance(arr, _vtk.vtkBitArray): arr = vtk_bit_array_to_char(arr) # Handle string arrays if isinstance(arr, _vtk.vtkStringArray): return convert_string_array(arr) # Convert from vtkDataArry to NumPy return _vtk.vtk_to_numpy(arr) def is_pyvista_dataset(obj): """Return ``True`` if the object is a PyVista wrapped dataset. Parameters ---------- obj : anything Any object to test. Returns ------- bool ``True`` when the object is a :class:`pyvista.DataSet`. """ return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock)) def point_array(obj, name): """Return point array of a pyvista or vtk object. Parameters ---------- obj : pyvista.DataSet or vtk.vtkDataSet PyVista or VTK dataset. name : str Name of the array. Returns ------- numpy.ndarray Wrapped array. """ vtkarr = obj.GetPointData().GetAbstractArray(name) return convert_array(vtkarr) def field_array(obj, name): """Return field data of a pyvista or vtk object. Parameters ---------- obj : pyvista.DataSet or vtk.vtkDataSet PyVista or VTK dataset. name : str Name of the array. Returns ------- numpy.ndarray Wrapped array. """ vtkarr = obj.GetFieldData().GetAbstractArray(name) return convert_array(vtkarr) def cell_array(obj, name): """Return cell array of a pyvista or vtk object. Parameters ---------- obj : pyvista.DataSet or vtk.vtkDataSet PyVista or VTK dataset. name : str Name of the array. Returns ------- numpy.ndarray Wrapped array. """ vtkarr = obj.GetCellData().GetAbstractArray(name) return convert_array(vtkarr) def row_array(obj, name): """Return row array of a vtk object. Parameters ---------- obj : vtk.vtkDataSet PyVista or VTK dataset. name : str Name of the array. Returns ------- numpy.ndarray Wrapped array. """ vtkarr = obj.GetRowData().GetAbstractArray(name) return convert_array(vtkarr) def parse_field_choice(field): """Return a field association object for a given field type string. Parameters ---------- field : str, FieldAssociation Name of the field (e.g, ``'cell'``, ``'field'``, ``'point'``, ``'row'``). Returns ------- pyvista.FieldAssociation Field association. """ if isinstance(field, str): field = field.strip().lower() if field in ['cell', 'c', 'cells']: field = FieldAssociation.CELL elif field in ['point', 'p', 'points']: field = FieldAssociation.POINT elif field in ['field', 'f', 'fields']: field = FieldAssociation.NONE elif field in ['row', 'r']: field = FieldAssociation.ROW else: raise ValueError(f'Data field ({field}) not supported.') elif isinstance(field, FieldAssociation): pass else: raise ValueError(f'Data field ({field}) not supported.') return field def get_array(mesh, name, preference='cell', err=False) -> Optional[np.ndarray]: """Search point, cell and field data for an array. Parameters ---------- mesh : pyvista.DataSet Dataset to get the array from. name : str The name of the array to get the range. preference : str, optional When scalars is specified, this is the preferred array type to search for in the dataset. Must be either ``'point'``, ``'cell'``, or ``'field'``. err : bool, optional Whether to throw an error if array is not present. Returns ------- pyvista.pyvista_ndarray or ``None`` Requested array. Return ``None`` if there is no array matching the ``name`` and ``err=False``. """ if isinstance(mesh, _vtk.vtkTable): arr = row_array(mesh, name) if arr is None and err: raise KeyError(f'Data array ({name}) not present in this dataset.') return arr parr = point_array(mesh, name) carr = cell_array(mesh, name) farr = field_array(mesh, name) preference = parse_field_choice(preference) if sum([array is not None for array in (parr, carr, farr)]) > 1: if preference == FieldAssociation.CELL: return carr elif preference == FieldAssociation.POINT: return parr elif preference == FieldAssociation.NONE: return farr else: raise ValueError(f'Data field ({preference}) not supported.') if parr is not None: return parr elif carr is not None: return carr elif farr is not None: return farr elif err: raise KeyError(f'Data array ({name}) not present in this dataset.') return None def get_array_association(mesh, name, preference='cell', err=False) -> FieldAssociation: """Return the array association. Parameters ---------- mesh : Dataset Dataset to get the array association from. name : str The name of the array. preference : str, optional When scalars is specified, this is the preferred array type to search for in the dataset. Must be either ``'point'``, ``'cell'``, or ``'field'``. err : bool, optional Boolean to control whether to throw an error if array is not present. Returns ------- pyvista.FieldAssociation Association of the array. If array is not present and ``err`` is ``False``, ``FieldAssociation.NONE`` is returned. """ if isinstance(mesh, _vtk.vtkTable): arr = row_array(mesh, name) if arr is None and err: raise KeyError(f'Data array ({name}) not present in this dataset.') return FieldAssociation.ROW # with multiple arrays, return the array preference if possible parr = point_array(mesh, name) carr = cell_array(mesh, name) farr = field_array(mesh, name) arrays = [parr, carr, farr] preferences = [FieldAssociation.POINT, FieldAssociation.CELL, FieldAssociation.NONE] preference = parse_field_choice(preference) if preference not in preferences: raise ValueError(f'Data field ({preference}) not supported.') matches = [pref for pref, array in zip(preferences, arrays) if array is not None] # optionally raise if no match if not matches: if err: raise KeyError(f'Data array ({name}) not present in this dataset.') return FieldAssociation.NONE # use preference if it applies if preference in matches: return preference # otherwise return first in order of point -> cell -> field return matches[0] def vtk_points(points, deep=True, force_float=False): """Convert numpy array or array-like to a ``vtkPoints`` object. Parameters ---------- points : numpy.ndarray or sequence Points to convert. Should be 1 or 2 dimensional. Accepts a single point or several points. deep : bool, optional Perform a deep copy of the array. Only applicable if ``points`` is a :class:`numpy.ndarray`. force_float : bool, optional Casts the datatype to ``float32`` if points datatype is non-float. Set this to ``False`` to allow non-float types, though this may lead to truncation of intermediate floats when transforming datasets. Returns ------- vtk.vtkPoints The vtkPoints object. Examples -------- >>> import pyvista >>> import numpy as np >>> points = np.random.random((10, 3)) >>> vpoints = pyvista.vtk_points(points) >>> vpoints # doctest:+SKIP (vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40 """ points = np.asanyarray(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') if force_float: if not np.issubdtype(points.dtype, np.floating): warnings.warn( 'Points is not a float type. This can cause issues when ' 'transforming or applying filters. Casting to ' '``np.float32``. Disable this by passing ' '``force_float=False``.' ) points = points.astype(np.float32) # check dimensionality if points.ndim == 1: points = points.reshape(-1, 3) elif points.ndim > 2: raise ValueError(f'Dimension of ``points`` should be 1 or 2, not {points.ndim}') # verify shape if points.shape[1] != 3: raise ValueError( 'Points array must contain three values per point. ' f'Shape is {points.shape} and should be (X, 3)' ) # points must be contiguous points = np.require(points, requirements=['C']) vtkpts = _vtk.vtkPoints() vtk_arr = _vtk.numpy_to_vtk(points, deep=deep) vtkpts.SetData(vtk_arr) return vtkpts def line_segments_from_points(points): """Generate non-connected line segments from points. Assumes points are ordered as line segments and an even number of points. Parameters ---------- points : numpy.ndarray Points representing line segments. An even number must be given as every two vertices represent a single line segment. For example, two line segments would be represented as ``np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])``. Returns ------- pyvista.PolyData PolyData with lines and cells. Examples -------- This example plots two line segments at right angles to each other. >>> import pyvista >>> import numpy as np >>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) >>> lines = pyvista.lines_from_points(points) >>> lines.plot() """ if len(points) % 2 != 0: raise ValueError("An even number of points must be given to define each segment.") # Assuming ordered points, create array defining line order n_points = len(points) n_lines = n_points // 2 lines = np.c_[ ( 2 * np.ones(n_lines, np.int_), np.arange(0, n_points - 1, step=2), np.arange(1, n_points + 1, step=2), ) ] poly = pyvista.PolyData() poly.points = points poly.lines = lines return poly def lines_from_points(points, close=False): """Make a connected line set given an array of points. Parameters ---------- points : np.ndarray Points representing the vertices of the connected segments. For example, two line segments would be represented as ``np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])``. close : bool, optional If ``True``, close the line segments into a loop. Returns ------- pyvista.PolyData PolyData with lines and cells. Examples -------- >>> import numpy as np >>> import pyvista >>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]]) >>> poly = pyvista.lines_from_points(points) >>> poly.plot(line_width=5) """ poly = pyvista.PolyData() poly.points = points cells = np.full((len(points) - 1, 3), 2, dtype=np.int_) cells[:, 1] = np.arange(0, len(points) - 1, dtype=np.int_) cells[:, 2] = np.arange(1, len(points), dtype=np.int_) if close: cells = np.append(cells, [[2, len(points) - 1, 0]], axis=0) poly.lines = cells return poly def make_tri_mesh(points, faces): """Construct a ``pyvista.PolyData`` mesh using points and faces arrays. Construct a mesh from an Nx3 array of points and an Mx3 array of triangle indices, resulting in a mesh with N vertices and M triangles. This function does not require the standard VTK "padding" column and simplifies mesh creation. Parameters ---------- points : np.ndarray Array of points with shape ``(N, 3)`` storing the vertices of the triangle mesh. faces : np.ndarray Array of indices with shape ``(M, 3)`` containing the triangle indices. Returns ------- pyvista.PolyData PolyData instance containing the triangle mesh. Examples -------- This example discretizes the unit square into a triangle mesh with nine vertices and eight faces. >>> import numpy as np >>> import pyvista >>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0], ... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0], ... [1, 1, 0]]) >>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8], ... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]]) >>> tri_mesh = pyvista.make_tri_mesh(points, faces) >>> tri_mesh.plot(show_edges=True, line_width=5) """ if points.shape[1] != 3: raise ValueError("Points array should have shape (N, 3).") if faces.ndim != 2 or faces.shape[1] != 3: raise ValueError("Face array should have shape (M, 3).") cells = np.empty((faces.shape[0], 4), dtype=faces.dtype) cells[:, 0] = 3 cells[:, 1:] = faces return pyvista.PolyData(points, cells) def vector_poly_data(orig, vec): """Create a pyvista.PolyData object composed of vectors. Parameters ---------- orig : numpy.ndarray Array of vector origins. vec : numpy.ndarray Array of vectors. Returns ------- pyvista.PolyData Mesh containing the ``orig`` points along with the ``'vectors'`` and ``'mag'`` point arrays representing the vectors and magnitude of the the vectors at each point. Examples -------- Create basic vector field. This is a point cloud where each point has a vector and magnitude attached to it. >>> import pyvista >>> import numpy as np >>> x, y = np.meshgrid(np.linspace(-5,5,10),np.linspace(-5,5,10)) >>> points = np.vstack((x.ravel(), y.ravel(), np.zeros(x.size))).T >>> u = x/np.sqrt(x**2 + y**2) >>> v = y/np.sqrt(x**2 + y**2) >>> vectors = np.vstack((u.ravel()**3, v.ravel()**3, np.zeros(u.size))).T >>> pdata = pyvista.vector_poly_data(points, vectors) >>> pdata.point_data.keys() ['vectors', 'mag'] Convert these to arrows and plot it. >>> pdata.glyph(orient='vectors', scale='mag').plot() """ # shape, dimension checking if not isinstance(orig, np.ndarray): orig = np.asarray(orig) if not isinstance(vec, np.ndarray): vec = np.asarray(vec) if orig.ndim != 2: orig = orig.reshape((-1, 3)) elif orig.shape[1] != 3: raise ValueError('orig array must be 3D') if vec.ndim != 2: vec = vec.reshape((-1, 3)) elif vec.shape[1] != 3: raise ValueError('vec array must be 3D') # Create vtk points and cells objects vpts = _vtk.vtkPoints() vpts.SetData(_vtk.numpy_to_vtk(np.ascontiguousarray(orig), deep=True)) npts = orig.shape[0] cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE) cells[:, 0] = 1 cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE) vcells = pyvista.utilities.cells.CellArray(cells, npts) # Create vtkPolyData object pdata = _vtk.vtkPolyData() pdata.SetPoints(vpts) pdata.SetVerts(vcells) # Add vectors to polydata name = 'vectors' vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(vec), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveVectors(name) # Add magnitude of vectors to polydata name = 'mag' scalars = (vec * vec).sum(1) ** 0.5 vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveScalars(name) return pyvista.PolyData(pdata) def trans_from_matrix(matrix): # pragma: no cover """Convert a vtk matrix to a numpy.ndarray. DEPRECATED: Please use ``array_from_vtkmatrix``. """ # import needs to happen here to prevent a circular import from pyvista.core.errors import DeprecationError raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.') def array_from_vtkmatrix(matrix): """Convert a vtk matrix to an array. Parameters ---------- matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 The vtk matrix to be converted to a ``numpy.ndarray``. Returned ndarray has shape (3, 3) or (4, 4) as appropriate. Returns ------- numpy.ndarray Numpy array containing the data from ``matrix``. """ if isinstance(matrix, _vtk.vtkMatrix3x3): shape = (3, 3) elif isinstance(matrix, _vtk.vtkMatrix4x4): shape = (4, 4) else: raise TypeError( 'Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,' f' got {type(matrix).__name__} instead.' ) array = np.zeros(shape) for i in range(shape[0]): for j in range(shape[1]): array[i, j] = matrix.GetElement(i, j) return array def vtkmatrix_from_array(array): """Convert a ``numpy.ndarray`` or array-like to a vtk matrix. Parameters ---------- array : numpy.ndarray or array-like The array or array-like to be converted to a vtk matrix. Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4) gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid. Returns ------- vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 VTK matrix. """ array = np.asarray(array) if array.shape == (3, 3): matrix = _vtk.vtkMatrix3x3() elif array.shape == (4, 4): matrix = _vtk.vtkMatrix4x4() else: raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).') m, n = array.shape for i in range(m): for j in range(n): matrix.SetElement(i, j, array[i, j]) return matrix def is_meshio_mesh(obj): """Test if passed object is instance of ``meshio.Mesh``. Parameters ---------- obj Any object. Returns ------- bool ``True`` if ``obj`` is an ``meshio.Mesh``. """ try: import meshio return isinstance(obj, meshio.Mesh) except ImportError: return False def wrap(dataset): """Wrap any given VTK data object to its appropriate PyVista data object. Other formats that are supported include: * 2D :class:`numpy.ndarray` of XYZ vertices * 3D :class:`numpy.ndarray` representing a volume. Values will be scalars. * 3D :class:`trimesh.Trimesh` mesh. * 3D :class:`meshio.Mesh` mesh. Parameters ---------- dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object Dataset to wrap. Returns ------- pyvista.DataSet The PyVista wrapped dataset. Examples -------- Wrap a numpy array representing a random point cloud. >>> import numpy as np >>> import pyvista >>> points = np.random.random((10, 3)) >>> cloud = pyvista.wrap(points) >>> cloud # doctest:+SKIP PolyData (0x7fc52db83d70) N Cells: 10 N Points: 10 X Bounds: 1.123e-01, 7.457e-01 Y Bounds: 1.009e-01, 9.877e-01 Z Bounds: 2.346e-03, 9.640e-01 N Arrays: 0 Wrap a Trimesh object. >>> import trimesh >>> import pyvista >>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]] >>> faces = [[0, 1, 2]] >>> tmesh = trimesh.Trimesh(points, faces=faces, process=False) >>> mesh = pyvista.wrap(tmesh) >>> mesh # doctest:+SKIP PolyData (0x7fc55ff27ad0) N Cells: 1 N Points: 3 X Bounds: 0.000e+00, 0.000e+00 Y Bounds: 0.000e+00, 1.000e+00 Z Bounds: 0.000e+00, 1.000e+00 N Arrays: 0 Wrap a VTK object. >>> import pyvista >>> import vtk >>> points = vtk.vtkPoints() >>> p = [1.0, 2.0, 3.0] >>> vertices = vtk.vtkCellArray() >>> pid = points.InsertNextPoint(p) >>> _ = vertices.InsertNextCell(1) >>> _ = vertices.InsertCellPoint(pid) >>> point = vtk.vtkPolyData() >>> _ = point.SetPoints(points) >>> _ = point.SetVerts(vertices) >>> mesh = pyvista.wrap(point) >>> mesh # doctest:+SKIP PolyData (0x7fc55ff27ad0) N Cells: 1 N Points: 3 X Bounds: 0.000e+00, 0.000e+00 Y Bounds: 0.000e+00, 1.000e+00 Z Bounds: 0.000e+00, 1.000e+00 N Arrays: 0 """ # Return if None if dataset is None: return # Check if dataset is a numpy array. We do this first since # pyvista_ndarray contains a VTK type that we don't want to # directly wrap. if isinstance(dataset, (np.ndarray, pyvista.pyvista_ndarray)): if dataset.ndim == 1 and dataset.shape[0] == 3: return pyvista.PolyData(dataset) if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3: return pyvista.PolyData(dataset) elif dataset.ndim == 3: mesh = pyvista.UniformGrid(dims=dataset.shape) mesh['values'] = dataset.ravel(order='F') mesh.active_scalars_name = 'values' return mesh else: raise NotImplementedError('NumPy array could not be wrapped pyvista.') # wrap VTK arrays as pyvista_ndarray if isinstance(dataset, _vtk.vtkDataArray): return pyvista.pyvista_ndarray(dataset) # Check if a dataset is a VTK type if hasattr(dataset, 'GetClassName'): key = dataset.GetClassName() try: return pyvista._wrappers[key](dataset) except KeyError: logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.') return # wrap meshio if is_meshio_mesh(dataset): return from_meshio(dataset) # wrap trimesh if dataset.__class__.__name__ == 'Trimesh': # trimesh doesn't pad faces n_face = dataset.faces.shape[0] faces = np.empty((n_face, 4), dataset.faces.dtype) faces[:, 1:] = dataset.faces faces[:, 0] = 3 return pyvista.PolyData(np.asarray(dataset.vertices), faces) # otherwise, flag tell the user we can't wrap this object raise NotImplementedError(f'Unable to wrap ({type(dataset)}) into a pyvista type.') def image_to_texture(image): """Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``. Parameters ---------- image : pyvista.UniformGrid or vtkImageData Image to convert. Returns ------- vtkTexture VTK texture. """ return pyvista.Texture(image) def numpy_to_texture(image): """Convert a NumPy image array to a vtk.vtkTexture. Parameters ---------- image : numpy.ndarray Numpy image array. Returns ------- vtkTexture VTK texture. """ return pyvista.Texture(image) def is_inside_bounds(point, bounds): """Check if a point is inside a set of bounds. This is implemented through recursion so that this is N-dimensional. Parameters ---------- point : sequence Three item cartesian point (i.e. ``[x, y, z]``). bounds : sequence Six item bounds in the form of ``(xMin, xMax, yMin, yMax, zMin, zMax)``. Returns ------- bool ``True`` when ``point`` is inside ``bounds``. """ if isinstance(point, (int, float)): point = [point] if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance( point, collections.deque ): if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0: raise ValueError('Bounds mismatch point dimensionality') point = collections.deque(point) bounds = collections.deque(bounds) return is_inside_bounds(point, bounds) if not isinstance(point, collections.deque): raise TypeError(f'Unknown input data type ({type(point)}).') if len(point) < 1: return True p = point.popleft() lower, upper = bounds.popleft(), bounds.popleft() if lower <= p <= upper: return is_inside_bounds(point, bounds) return False def fit_plane_to_points(points, return_meta=False): """Fit a plane to a set of points using the SVD algorithm. Parameters ---------- points : sequence Size ``[N x 3]`` sequence of points to fit a plane through. return_meta : bool, optional If ``True``, also returns the center and normal used to generate the plane. Returns ------- pyvista.PolyData Plane mesh. numpy.ndarray Plane center if ``return_meta=True``. numpy.ndarray Plane normal if ``return_meta=True``. Examples -------- Fit a plane to a random point cloud. >>> import pyvista >>> import numpy as np >>> cloud = np.random.random((10, 3)) >>> cloud[:, 2] *= 0.1 >>> plane, center, normal = pyvista.fit_plane_to_points(cloud, return_meta=True) Plot the fitted plane. >>> pl = pyvista.Plotter() >>> _ = pl.add_mesh(plane, color='tan', style='wireframe', line_width=4) >>> _ = pl.add_points(cloud, render_points_as_spheres=True, ... color='r', point_size=30) >>> pl.show() """ data = np.array(points) center = data.mean(axis=0) result = np.linalg.svd(data - center) normal = np.cross(result[2][0], result[2][1]) plane = pyvista.Plane(center=center, direction=normal) if return_meta: return plane, center, normal return plane def raise_not_matching(scalars, dataset): """Raise exception about inconsistencies. Parameters ---------- scalars : numpy.ndarray Array of scalars. dataset : pyvista.DataSet Dataset to check against. Raises ------ ValueError Raises a ValueError if the size of scalars does not the dataset. """ if isinstance(dataset, _vtk.vtkTable): raise ValueError( f'Number of scalars ({scalars.size}) must match number of rows ({dataset.n_rows}).' ) raise ValueError( f'Number of scalars ({scalars.size}) ' f'must match either the number of points ({dataset.n_points}) ' f'or the number of cells ({dataset.n_cells}).' ) def generate_plane(normal, origin): """Return a _vtk.vtkPlane. Parameters ---------- normal : sequence Three item sequence representing the normal of the plane. origin : sequence Three item sequence representing the origin of the plane. Returns ------- vtk.vtkPlane VTK plane. """ plane = _vtk.vtkPlane() # NORMAL MUST HAVE MAGNITUDE OF 1 normal = normal / np.linalg.norm(normal) plane.SetNormal(normal) plane.SetOrigin(origin) return plane def try_callback(func, *args): """Wrap a given callback in a try statement. Parameters ---------- func : callable Callable object. *args Any arguments. """ try: func(*args) except Exception: etype, exc, tb = sys.exc_info() stack = traceback.extract_tb(tb)[1:] formatted_exception = 'Encountered issue in callback (most recent call last):\n' + ''.join( traceback.format_list(stack) + traceback.format_exception_only(etype, exc) ).rstrip('\n') logging.warning(formatted_exception) def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0): """Check if depth peeling is available. Attempts to use depth peeling to see if it is available for the current environment. Returns ``True`` if depth peeling is available and has been successfully leveraged, otherwise ``False``. Parameters ---------- number_of_peels : int, optional Maximum number of depth peels. occlusion_ratio : float, optional Occlusion ratio. Returns ------- bool ``True`` when system supports depth peeling with the specified settings. """ # Try Depth Peeling with a basic scene source = _vtk.vtkSphereSource() mapper = _vtk.vtkPolyDataMapper() mapper.SetInputConnection(source.GetOutputPort()) actor = _vtk.vtkActor() actor.SetMapper(mapper) # requires opacity < 1 actor.GetProperty().SetOpacity(0.5) renderer = _vtk.vtkRenderer() renderWindow = _vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindow.SetOffScreenRendering(True) renderWindow.SetAlphaBitPlanes(True) renderWindow.SetMultiSamples(0) renderer.AddActor(actor) renderer.SetUseDepthPeeling(True) renderer.SetMaximumNumberOfPeels(number_of_peels) renderer.SetOcclusionRatio(occlusion_ratio) renderWindow.Render() return renderer.GetLastRenderingUsedDepthPeeling() == 1 def threaded(fn): """Call a function using a thread. Parameters ---------- fn : callable Callable object. Returns ------- function Wrapped function. """ def wrapper(*args, **kwargs): thread = Thread(target=fn, args=args, kwargs=kwargs) thread.start() return thread return wrapper class conditional_decorator: """Conditional decorator for methods. Parameters ---------- dec Decorator condition Condition to match. """ def __init__(self, dec, condition): """Initialize.""" self.decorator = dec self.condition = condition def __call__(self, func): """Call the decorated function if condition is matched.""" if not self.condition: # Return the function unchanged, not decorated. return func return self.decorator(func) class ProgressMonitor: """A standard class for monitoring the progress of a VTK algorithm. This must be use in a ``with`` context and it will block keyboard interrupts from happening until the exit event as interrupts will crash the kernel if the VTK algorithm is still executing. Parameters ---------- algorithm VTK algorithm or filter. message : str, optional Message to display in the progress bar. scaling : float, optional Unused keyword argument. """ def __init__(self, algorithm, message="", scaling=100): """Initialize observer.""" try: from tqdm import tqdm # noqa except ImportError: raise ImportError("Please install `tqdm` to monitor algorithms.") self.event_type = _vtk.vtkCommand.ProgressEvent self.progress = 0.0 self._last_progress = self.progress self.algorithm = algorithm self.message = message self._interrupt_signal_received = False self._old_progress = 0 self._old_handler = None self._progress_bar = None def handler(self, sig, frame): """Pass signal to custom interrupt handler.""" self._interrupt_signal_received = (sig, frame) logging.debug('SIGINT received. Delaying KeyboardInterrupt until VTK algorithm finishes.') def __call__(self, obj, event, *args): """Call progress update callback. On an event occurrence, this function executes. """ if self._interrupt_signal_received: obj.AbortExecuteOn() else: progress = obj.GetProgress() step = progress - self._old_progress self._progress_bar.update(step) self._old_progress = progress def __enter__(self): """Enter event for ``with`` context.""" from tqdm import tqdm # check if in main thread if threading.current_thread().__class__.__name__ == '_MainThread': self._old_handler = signal.signal(signal.SIGINT, self.handler) self._progress_bar = tqdm( total=1, leave=True, bar_format='{l_bar}{bar}[{elapsed}<{remaining}]' ) self._progress_bar.set_description(self.message) self.algorithm.AddObserver(self.event_type, self) return self._progress_bar def __exit__(self, type, value, traceback): """Exit event for ``with`` context.""" self._progress_bar.total = 1 self._progress_bar.refresh() self._progress_bar.close() self.algorithm.RemoveObservers(self.event_type) if threading.current_thread().__class__.__name__ == '_MainThread': signal.signal(signal.SIGINT, self._old_handler) def abstract_class(cls_): """Decorate a class, overriding __new__. Preventing a class from being instantiated similar to abc.ABCMeta but does not require an abstract method. """ def __new__(cls, *args, **kwargs): if cls is cls_: raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.') return object.__new__(cls) cls_.__new__ = __new__ return cls_ def axis_rotation(points, angle, inplace=False, deg=True, axis='z'): """Rotate points by angle about an axis. Parameters ---------- points : numpy.ndarray Array of points with shape ``(N, 3)``. angle : float Rotation angle. inplace : bool, optional Updates points in-place while returning nothing. deg : bool, optional If ``True``, the angle is interpreted as degrees instead of radians. Default is ``True``. axis : str, optional Name of axis to rotate about. Valid options are ``'x'``, ``'y'``, and ``'z'``. Default value is ``'z'``. Returns ------- numpy.ndarray Rotated points. Examples -------- Rotate a set of points by 90 degrees about the x-axis in-place. >>> import numpy as np >>> import pyvista >>> from pyvista import examples >>> points = examples.load_airplane().points >>> points_orig = points.copy() >>> pyvista.axis_rotation(points, 90, axis='x', deg=True, inplace=True) >>> assert np.all(np.isclose(points[:, 0], points_orig[:, 0])) >>> assert np.all(np.isclose(points[:, 1], -points_orig[:, 2])) >>> assert np.all(np.isclose(points[:, 2], points_orig[:, 1])) """ axis = axis.lower() axis_to_vec = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)} if axis not in axis_to_vec: raise ValueError('Invalid axis. Must be either "x", "y", or "z"') rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg) return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace) def cubemap(path='', prefix='', ext='.jpg'): """Construct a cubemap from 6 images. Each of the 6 images must be in the following format: - <prefix>negx<ext> - <prefix>negy<ext> - <prefix>negz<ext> - <prefix>posx<ext> - <prefix>posy<ext> - <prefix>posz<ext> Prefix may be empty, and extension will default to ``'.jpg'`` For example, if you have 6 images with the skybox2 prefix: - ``'skybox2-negx.jpg'`` - ``'skybox2-negy.jpg'`` - ``'skybox2-negz.jpg'`` - ``'skybox2-posx.jpg'`` - ``'skybox2-posy.jpg'`` - ``'skybox2-posz.jpg'`` Parameters ---------- path : str, optional Directory containing the cubemap images. prefix : str, optional Prefix to the filename. ext : str, optional The filename extension. For example ``'.jpg'``. Returns ------- pyvista.Texture Texture with cubemap. Examples -------- >>> import pyvista >>> skybox = pyvista.cubemap('my_directory', 'skybox', '.jpeg') # doctest:+SKIP """ sets = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz'] image_paths = [os.path.join(path, f'{prefix}{suffix}{ext}') for suffix in sets] for image_path in image_paths: if not os.path.isfile(image_path): file_str = '\n'.join(image_paths) raise FileNotFoundError( f'Unable to locate {image_path}\n' 'Expected to find the following files:\n' f'{file_str}' ) texture = pyvista.Texture() texture.SetMipmap(True) texture.SetInterpolate(True) texture.cube_map = True # Must be set prior to setting images # add each image to the cubemap for i, fn in enumerate(image_paths): image = pyvista.read(fn) flip = _vtk.vtkImageFlip() flip.SetInputDataObject(image) flip.SetFilteredAxis(1) # flip y axis flip.Update() texture.SetInputDataObject(i, flip.GetOutput()) return texture
duplex.py
import gc import multiprocessing import os import random import sys import threading import unittest from buffered_pipe import Generic_Pipe, Static_Pipe CTX = ["fork", "spawn", "forkserver"] CTX = [multiprocessing.get_context(I) for I in CTX] TYPE = ["Generic_Pipe", "Static_Pipe"] PIPE = [lambda: Generic_Pipe(1024, 64, duplex=True), lambda: Static_Pipe(64, 4, duplex=True)] DATA = [lambda: os.urandom(random.randrange(512)), lambda: os.urandom(64)] class test_pipe_alive: @staticmethod def send_fn(barrier, pipe, data, result): barrier[0].wait() try: pipe.register() except Exception as e: result.put(e) barrier[1].wait() barrier[2].wait() return try: barrier[1].wait() barrier[2].wait() list(map(pipe.send, data)) except Exception as e: result.put(e) return result.put(None) @staticmethod def recv_fn(barrier, pipe, data, result): barrier[0].wait() try: pipe.register() except Exception as e: result.put(e) barrier[1].wait() barrier[2].wait() return try: barrier[1].wait() barrier[2].wait() for I in data: assert pipe.recv() == I except Exception as e: result.put(e) return result.put(None) def __init__(self, pipe_1, pipe_2, data1, data2, ctx, lazy_start): self.barrier = [ctx.Barrier(5) for _ in range(3)] self.Q_send = ctx.Queue() self.Q_recv = ctx.Queue() self.Ps = [ ctx.Process(target=test_pipe_alive.send_fn, args=(self.barrier, pipe_1, data1, self.Q_send)), ctx.Process(target=test_pipe_alive.recv_fn, args=(self.barrier, pipe_2, data1, self.Q_recv)), ctx.Process(target=test_pipe_alive.send_fn, args=(self.barrier, pipe_2, data2, self.Q_send)), ctx.Process(target=test_pipe_alive.recv_fn, args=(self.barrier, pipe_1, data2, self.Q_recv)), ] self.started = False self.barrier[0].resolved = False if not lazy_start: self.started = True for P in self.Ps: P.start() def resolve_register(self): if not self.started: self.started = True for P in self.Ps: P.start() self.barrier[0].wait() self.barrier[0].resolved = True self.barrier[1].wait() def execute(self): if not self.started: self.started = True for P in self.Ps: P.start() if not self.barrier[0].resolved: self.barrier[0].wait() self.barrier[1].wait() self.barrier[2].wait() result = {"send": self.Q_send.get(), "recv": self.Q_recv.get()} for P in self.Ps: P.join() return result class Type_0(unittest.TestCase): # risk case # Main -> recv # main send / main delete / recv def test_0(self): # P1 P2 register # P1 P2 communicate gc.collect() for Pipe_gen, Data_gen, test_type in zip(PIPE, DATA, TYPE): for ctx in CTX: for _ in range(10): pipe_r, pipe_w = Pipe_gen() datas = [Data_gen() for _ in range(1000)], [Data_gen() for _ in range(1000)] tester = test_pipe_alive(pipe_r, pipe_w, *datas, ctx, lazy_start=False) tester.resolve_register() self.assertDictEqual(tester.execute(), {"send": None, "recv": None}) gc.collect() def test_1(self): # P1 P2 register # main delete & gc # P1 P2 communicate gc.collect() for delete_r in [True, False]: for delete_w in [True, False]: for Pipe_gen, Data_gen, test_type in zip(PIPE, DATA, TYPE): for ctx in CTX: for _ in range(10): pipe_r, pipe_w = Pipe_gen() datas = [Data_gen() for _ in range(1000)], [Data_gen() for _ in range(1000)] tester = test_pipe_alive(pipe_r, pipe_w, *datas, ctx, lazy_start=False) tester.resolve_register() if delete_r: del pipe_r if delete_w: del pipe_w gc.collect() result = tester.execute() str_result = {K: str(V) for K, V in result.items()} self.assertDictEqual( result, {"send": None, "recv": None}, msg=f"fail on ctx = {type(ctx).__name__} / {test_type}{['', ' / delete_r'][delete_r]}{['', ' / delete_w'][delete_w]}\n detail = {str_result}", ) gc.collect() def test_2(self): # P1 P2 register # main delete & gc # P1 P2 communicate gc.collect() for delete_r in [True, False]: for delete_w in [True, False]: for Pipe_gen, Data_gen, test_type in zip(PIPE, DATA, TYPE): for ctx in CTX: for _ in range(10): pipe_r, pipe_w = Pipe_gen() datas = [Data_gen() for _ in range(1000)], [Data_gen() for _ in range(1000)] tester = test_pipe_alive(pipe_r, pipe_w, *datas, ctx, lazy_start=False) if delete_r: del pipe_r if delete_w: del pipe_w gc.collect() tester.resolve_register() result = tester.execute() str_result = {K: str(V) for K, V in result.items()} if delete_r and delete_w and ctx != multiprocessing.get_context("fork"): self.assertNotEqual( result, {"send": None, "recv": None}, msg=f"fail on ctx = {type(ctx).__name__} / {test_type}{['', ' / delete_r'][delete_r]}{['', ' / delete_w'][delete_w]}\n detail = {str_result}", ) else: self.assertDictEqual( result, {"send": None, "recv": None}, msg=f"fail on ctx = {type(ctx).__name__} / {test_type}{['', ' / delete_r'][delete_r]}{['', ' / delete_w'][delete_w]}\n detail = {str_result}", ) gc.collect() if __name__ == "__main__": path_info = __file__.split("/") path_info = "/".join(path_info[path_info.index("tests") :]) print(path_info) unittest.main(argv=[""])
mqtt_server.py
import socket import threading import json from .compat import b from .mqtt import MQTT from .const import ( LOGGER, SHELLY_TYPES ) from .utils import exception_log class MQTT_connection: def __init__(self, mqtt, connection, client_address): self._mqtt_server = mqtt self._connection = connection #connection.settimeout(5) self._id = None self._client_address = client_address self._thread = threading.Thread(target=self._loop) self._thread.name = "MQTT connection" self._thread.daemon = True self._thread.start() def _loop(self): try: while not self._mqtt_server._root.stopped.isSet(): try: head = b(self._connection.recv(1)) if not head: break pkg_type=head[0]>>4 flags=head[0]&0xF qos = (flags >> 1) & 0x3 length = 0 for s in range(0,4): ldata = b(self._connection.recv(1))[0] length += (ldata & 0x7F) << (s * 7) if not ldata & 128: break LOGGER.debug("type=%d, flags=%d, length=%d", pkg_type, flags, length) data = b(self._connection.recv(length, socket.MSG_WAITALL)) if length else [] if pkg_type==1: #connected if data[0]!=0 or data[1]!=4 or data[2:6]!=b'MQTT': break client_len = (data[10] << 8) + data[11] id = data[12:12+client_len].decode() self._id = id msg = b'\x20\x02\x20\x00' self._connection.send(msg) msg = self._mqtt_server.create_msg(self._id, 'command', 'announce') self._connection.send(msg) if pkg_type==3: pos = 2 topic_len = (data[0]<<8) + data[1] topic = data[pos:pos+topic_len].decode('ASCII') pos += topic_len if qos>0: pos+=2 payload = data[pos:].decode('ASCII') self._mqtt_server.receive_msg(topic, payload) # if topic=='shellies/announce': # payload = json.loads(payload) # ip_addr = payload['ip'] # shelly_id = payload['id'] # shelly_type, device_id = shelly_id.rsplit('-',1) # device_type = self._mqtt_server._mqtt_types.get(shelly_type) # if device_type: # self._mqtt_server._root.update_block(device_id, \ # device_type, ip_addr, 'MQTT-discovery', None) # else: # topics = topic.split('/') # shelly_id = topics[1] # shelly_type, device_id = shelly_id.rsplit('-',1) # device_type = self._mqtt_server._mqtt_types.get(shelly_type) # self._mqtt_server._root.update_block(device_id, \ # device_type, None, 'MQTT-data', None, True) if pkg_type==12: #Ping msg = b'\xD0\x00' self._connection.send(msg) except socket.timeout: pass except Exception as ex: exception_log(ex, "Error receiving MQTT message") break finally: #Clean up try: self._connection.close() except: pass try: self._mqtt_server._connections.remove(self) except: pass def send(self, data): try: self._connection.send(data) except: LOGGER.exception("Error sending MQTT") class MQTT_server(MQTT): def __init__(self, root): super(MQTT_server, self).__init__(root, "Server") self._thread = threading.Thread(target=self._loop) self._thread.name = "MQTT" self._thread.daemon = True self._socket = None self._connections = [] def start(self): if self._root.mqtt_port > 0: self._init_socket() self._thread.start() def _init_socket(self): # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((self._root.bind_ip, self._root.mqtt_port)) sock.listen(1) self._socket = sock def _loop(self): while not self._root.stopped.isSet(): try: # Wait for a connection connection, client_address = self._socket.accept() conn = MQTT_connection(self, connection, client_address) self._connections.append(conn) except: LOGGER.exception("Error connect MQTT") def close(self): if self._socket: self._socket.close() def _add_len(self, data): length = len(data) data.insert(0, length & 0x7F) while length > 0x7f: length >>= 7 data.insert(0, (length & 0x7F) | 0x80) def create_msg(self, name, topic, payload): t = "shellies/" + name + "/" + topic data = bytearray(t, 'cp1252') data.insert(0, len(t) >> 8) data.insert(1, len(t) & 0xFF) data.extend(bytearray(payload, 'cp1252')) self._add_len(data) data.insert(0, 0x30) return data def send(self, name, topic, payload): data = self.create_msg(name, topic, payload) for conn in self._connections: if name == conn._id: conn.send(data)
identi.py
#!/usr/bin/env python3 # zeroex00.com # rfc1413 import argparse import socket import sys import threading master_results = [] master_banners = {} master_errors = [] def main(args): if not args.query_port and not args.all_ports: print("[!] you must specify at least one port or -a") exit(2) hostname = clean_host(args.host) ip_addr = resolve_host(hostname) # if not check_ident_port(args.host, args.port, ip_addr): # print("[!] Exiting...") # exit(1) if args.all_ports: query_ports = list(map(str, range(1, 65536))) q_string = "1-65535" else: query_ports = args.query_port q_string = " ".join(query_ports) print( "[+] starting scan on {0} ({1}) {2} for connections to {3}".format( hostname, ip_addr, args.port, q_string ) ) try: do_threaded_work(args.host, args.port, query_ports, verbose=args.verbose) except KeyboardInterrupt: print("Interrupted! Printing results:") print_results(suppress=True, verbose=args.verbose) print("[!] Errors suppressed on interrupt!") exit(1) if args.all_ports: print_results(suppress=True, verbose=args.verbose) print("[!] Errors suppressed on full scan!") else: print_results(verbose=args.verbose) exit(0) def parse_args(argv): parser = argparse.ArgumentParser() parser.add_argument("host", help="host to scan") parser.add_argument( "-q", "--query-port", nargs="+", help="port(s) which the scan will query(ex: 22 or 21 22 23)", ) parser.add_argument( "-p", "--port", default="113", type=int, help="port IDENT service is listening on (default: 113)", ) parser.add_argument( "-a", "--all-ports", action="store_true", help="queries ALL ports!" ) parser.add_argument( "-v", "--verbose", action="count", default=0, help="increase verbosity - v: shows full success responses; vv: shows all open port responses", ) return parser.parse_args(argv) def clean_host(host): if host.startswith("http://"): tmp_host = host[7:] elif host.startswith("https://"): tmp_host = host[8:] else: tmp_host = host return tmp_host def resolve_host(host): try: ip = socket.gethostbyname(host) except socket.error: return "?.?.?.?" return ip def check_ident_port(host, port, ip): print("[+] Checking if {0} ({1}) is listening on port: {2}".format(host, ip, port)) try: client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.settimeout(5) client.connect((host, port)) except socket.error: print("[!] {0} ({1}) is not listening on port: {2}!".format(host, ip, port)) return False except OverflowError: print("[!] Invalid port!: {0}".format(port)) return False client.close() return True def enum_port(host, port, query_port, verbose=0): try: client1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client1.connect((host, query_port)) local_port = client1.getsockname()[1] except socket.error: master_errors.append("{0:>5}: connection refused".format(query_port)) return except OverflowError: master_errors.append("{0:>5}: invalid port".format(query_port)) return client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect((host, port)) try: client.send(str(query_port) + "," + str(local_port) + "\x0d\x0a") results = str(client.recv(4096)) client1.settimeout(1) client1.send("\x0d\x0a") try: banner = str(client1.recv(4096)).strip() except socket.error: banner = "" except Exception: master_errors.append("{0:>5}: e".format(query_port)) client1.close() client.close() return if verbose > 1: master_results.append(results.strip()) master_banners[str(query_port)] = str(banner) elif ": USERID :" in results: master_results.append(results.strip()) master_banners[str(query_port)] = str(banner) client1.close() client.close() def tqdm(iterable): def report(i): print(f"{i+1:>{formatter}}/{total}", file=sys.stderr, end="\r") total = len(iterable) formatter = len(str(total)) for i, el in enumerate(iterable): yield el report(i) def do_threaded_work(host, port, q_ports, verbose=0): threads = [] for i in tqdm(q_ports): thread = threading.Thread(target=enum_port, args=(host, port, int(i), verbose)) threads.append(thread) thread.start() for thread in threads: thread.join() def print_results(suppress=False, verbose=0): print("[*] Results:") if verbose > 0: print("\t(VERBOSE: Raw responses || Banners)") elif verbose == 0: print("\t{0:>5} {1:<20} {2}".format("Port", "Username", "Banner")) print("\t{0:>5} {1:<20} {2}".format("----", "--------", "------")) for each_result in master_results: tmp_result = each_result.split(":") # ports, USERID, UNIX, username result_port = str(tmp_result[0].split(",")[0]).strip() result_username = tmp_result[3] result_banner = master_banners.get(result_port, "") if verbose > 0: print("\t{0} || {1}".format(each_result, result_banner)) else: print( "\t{0:>5}: {1:<20} {2}".format( result_port, result_username, result_banner ) ) if suppress: return print("[!] Errors:") for each_result in master_errors: print("\t{0}".format(each_result)) if len(master_results) == 0 and len(master_errors) == 0: print( ( "[+] A lack of results AND errors could mean that the specified IDENT port is not actually running the " "IDENT service" ) ) if __name__ == "__main__": main(parse_args(sys.argv[1:]))
testTreeViewerInterface.py
import director import os import subprocess import json import threading import time import math import sys import numpy as np import lcm import robotlocomotion as lcmrl def comms_msg(timestamp, data): msg = lcmrl.viewer2_comms_t() msg.format = "treeviewer_json" msg.format_version_major = 1 msg.format_version_minor = 0 msg.data = bytearray(json.dumps(data), encoding='utf-8') msg.num_bytes = len(msg.data) return msg class Visualizer: def __init__(self, geometries={}): self.geometries = {} self.poses = {} self.queue = {"setgeometry": [], "settransform": [], "delete": []} for (path, geom) in list(geometries.items()): self.setgeometry(path, geom) self.lcm = lcm.LCM() self.lcm.subscribe("DIRECTOR_TREE_VIEWER_RESPONSE", self.onResponse) self.listener = threading.Thread(target=self.listen) self.listener.daemon = True self.listener.start() def listen(self): while True: self.lcm.handle_timeout(10) def publish(self): timestamp = 0 data = { "timestamp": timestamp, "delete": self.queue["delete"], "setgeometry": self.queue["setgeometry"], "settransform": self.queue["settransform"] } msg = comms_msg(timestamp, data) self.lcm.publish("DIRECTOR_TREE_VIEWER_REQUEST", msg.encode()) self.queue["setgeometry"] = [] self.queue["delete"] = [] self.queue["settransform"] = [] def setgeometry(self, path, geometry): self.geometries[path] = geometry self.queue["setgeometry"].append({ "path": path.split("/"), "geometry": geometry }) def settransform(self, path, pose): self.poses[path] = pose self.queue["settransform"].append({ "path": path.split("/"), "transform": pose }) def delete(self, path): if path in self.poses: del self.poses[path] if path in self.geometries: del self.geometries[path] self.queue["delete"].append({ "path": path.split("/") }) def onResponse(self, channel, raw_data): msg = lcmrl.viewer2_comms_t.decode(raw_data) response = json.loads(msg.data.decode()) if response["status"] == 0: print("ok") elif response["status"] == 1: for path, geom in list(self.geometries.items()): self.setgeometry(path, geom) for path, pose in list(self.poses.items()): self.settransform(path, pose) else: print("unhandled:", response) if __name__ == '__main__': # We'll open the visualizer by spawning it as a subprocess. See # testDrakeVisualizer.py for an example of how to spawn it within Python # instead. vis_binary = os.path.join(os.path.dirname(sys.executable), "drake-visualizer") # The viewer will take some time to load before it is ready to receive # messages, so we'll wait until it sends its first status message. print("waiting for viewer to initialize") lc = lcm.LCM() lc.subscribe("DIRECTOR_TREE_VIEWER_RESPONSE", lambda c, d: None) vis_process = subprocess.Popen([vis_binary, '--testing', '--interactive']) # Wait for one LCM message to be received. lc.handle() geometries = { "robot1/link1/box1": { "type": "box", "color": [0, 1, 0, 0.5], "lengths": [1, 1, 1] }, "robot1/link1/box2": { "type": "box", "color": [0, 0, 1, 0.5], "lengths": [1, 1, 1] }, "robot1/link1/points": { "type": "pointcloud", "points": [[0, 0, 2 + x / 100.] for x in range(100)], "channels": { "rgb": [[x / 100., 1 - x / 100., x / 100.] for x in range(100)] } }, "robot1/link1/planar lidar": { "type": "planar_lidar", "angle_start": -np.pi/2, "angle_step": np.pi / 100, "ranges": [1 for i in range(100)], "channels": { "intensity": [i / 100. for i in range(100)] } }, "triads/triad1": { "type": "triad" } } vis = Visualizer(geometries) vis.settransform("robot1/link1/box2", {"translation": [1, 0, 0], "quaternion": [1, 0, 0, 0]}) vis.settransform("robot1/link1/points", {"translation": [0, 1, 0], "quaternion": [1, 0, 0, 0]}) vis.settransform("robot1/link1/planar lidar", {"translation": [0, 2, 0], "quaternion": [1, 0, 0, 0]}) for j in range(2): for i in range(1000): x1 = math.sin(math.pi * 2 * i / 1000.0) pose = { "translation": [x1, 0, 0], "quaternion": [1, 0, 0, 0] } vis.settransform("robot1/link1", pose) x2 = math.sin(math.pi * 2 * i / 500.0) pose = { "translation": [x2, 0, 0], "quaternion": [1, 0, 0, 0] } vis.settransform("robot1/link1/box1", pose) vis.publish() time.sleep(0.001) vis_process.terminate()
test_socket.py
import unittest from test import support from test.support import os_helper from test.support import socket_helper from test.support import threading_helper import errno import io import itertools import socket import select import tempfile import time import traceback import queue import sys import os import platform import array import contextlib from weakref import proxy import signal import math import pickle import struct import random import shutil import string import _thread as thread import threading try: import multiprocessing except ImportError: multiprocessing = False try: import fcntl except ImportError: fcntl = None HOST = socket_helper.HOST # test unicode string and carriage return MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') VSOCKPORT = 1234 AIX = platform.system() == "AIX" try: import _socket except ImportError: _socket = None def get_cid(): if fcntl is None: return None if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'): return None try: with open("/dev/vsock", "rb") as f: r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ") except OSError: return None else: return struct.unpack("I", r)[0] def _have_socket_can(): """Check whether CAN sockets are supported on this host.""" try: s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) except (AttributeError, OSError): return False else: s.close() return True def _have_socket_can_isotp(): """Check whether CAN ISOTP sockets are supported on this host.""" try: s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) except (AttributeError, OSError): return False else: s.close() return True def _have_socket_can_j1939(): """Check whether CAN J1939 sockets are supported on this host.""" try: s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) except (AttributeError, OSError): return False else: s.close() return True def _have_socket_rds(): """Check whether RDS sockets are supported on this host.""" try: s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) except (AttributeError, OSError): return False else: s.close() return True def _have_socket_alg(): """Check whether AF_ALG sockets are supported on this host.""" try: s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0) except (AttributeError, OSError): return False else: s.close() return True def _have_socket_qipcrtr(): """Check whether AF_QIPCRTR sockets are supported on this host.""" try: s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0) except (AttributeError, OSError): return False else: s.close() return True def _have_socket_vsock(): """Check whether AF_VSOCK sockets are supported on this host.""" ret = get_cid() is not None return ret def _have_socket_bluetooth(): """Check whether AF_BLUETOOTH sockets are supported on this host.""" try: # RFCOMM is supported by all platforms with bluetooth support. Windows # does not support omitting the protocol. s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) except (AttributeError, OSError): return False else: s.close() return True @contextlib.contextmanager def socket_setdefaulttimeout(timeout): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(timeout) yield finally: socket.setdefaulttimeout(old_timeout) HAVE_SOCKET_CAN = _have_socket_can() HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp() HAVE_SOCKET_CAN_J1939 = _have_socket_can_j1939() HAVE_SOCKET_RDS = _have_socket_rds() HAVE_SOCKET_ALG = _have_socket_alg() HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr() HAVE_SOCKET_VSOCK = _have_socket_vsock() HAVE_SOCKET_UDPLITE = hasattr(socket, "IPPROTO_UDPLITE") HAVE_SOCKET_BLUETOOTH = _have_socket_bluetooth() # Size in bytes of the int type SIZEOF_INT = array.array("i").itemsize class SocketTCPTest(unittest.TestCase): def setUp(self): self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = socket_helper.bind_port(self.serv) self.serv.listen() def tearDown(self): self.serv.close() self.serv = None class SocketUDPTest(unittest.TestCase): def setUp(self): self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.port = socket_helper.bind_port(self.serv) def tearDown(self): self.serv.close() self.serv = None class SocketUDPLITETest(SocketUDPTest): def setUp(self): self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE) self.port = socket_helper.bind_port(self.serv) class ThreadSafeCleanupTestCase(unittest.TestCase): """Subclass of unittest.TestCase with thread-safe cleanup methods. This subclass protects the addCleanup() and doCleanups() methods with a recursive lock. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._cleanup_lock = threading.RLock() def addCleanup(self, *args, **kwargs): with self._cleanup_lock: return super().addCleanup(*args, **kwargs) def doCleanups(self, *args, **kwargs): with self._cleanup_lock: return super().doCleanups(*args, **kwargs) class SocketCANTest(unittest.TestCase): """To be able to run this test, a `vcan0` CAN interface can be created with the following commands: # modprobe vcan # ip link add dev vcan0 type vcan # ip link set up vcan0 """ interface = 'vcan0' bufsize = 128 """The CAN frame structure is defined in <linux/can.h>: struct can_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 can_dlc; /* data length code: 0 .. 8 */ __u8 data[8] __attribute__((aligned(8))); }; """ can_frame_fmt = "=IB3x8s" can_frame_size = struct.calcsize(can_frame_fmt) """The Broadcast Management Command frame structure is defined in <linux/can/bcm.h>: struct bcm_msg_head { __u32 opcode; __u32 flags; __u32 count; struct timeval ival1, ival2; canid_t can_id; __u32 nframes; struct can_frame frames[0]; } `bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see `struct can_frame` definition). Must use native not standard types for packing. """ bcm_cmd_msg_fmt = "@3I4l2I" bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8) def setUp(self): self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) self.addCleanup(self.s.close) try: self.s.bind((self.interface,)) except OSError: self.skipTest('network interface `%s` does not exist' % self.interface) class SocketRDSTest(unittest.TestCase): """To be able to run this test, the `rds` kernel module must be loaded: # modprobe rds """ bufsize = 8192 def setUp(self): self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) self.addCleanup(self.serv.close) try: self.port = socket_helper.bind_port(self.serv) except OSError: self.skipTest('unable to bind RDS socket') class ThreadableTest: """Threadable Test class The ThreadableTest class makes it easy to create a threaded client/server pair from an existing unit test. To create a new threaded class from an existing unit test, use multiple inheritance: class NewClass (OldClass, ThreadableTest): pass This class defines two new fixture functions with obvious purposes for overriding: clientSetUp () clientTearDown () Any new test functions within the class must then define tests in pairs, where the test name is preceded with a '_' to indicate the client portion of the test. Ex: def testFoo(self): # Server portion def _testFoo(self): # Client portion Any exceptions raised by the clients during their tests are caught and transferred to the main thread to alert the testing framework. Note, the server setup function cannot call any blocking functions that rely on the client thread during setup, unless serverExplicitReady() is called just before the blocking call (such as in setting up a client/server connection and performing the accept() in setUp(). """ def __init__(self): # Swap the true setup function self.__setUp = self.setUp self.__tearDown = self.tearDown self.setUp = self._setUp self.tearDown = self._tearDown def serverExplicitReady(self): """This method allows the server to explicitly indicate that it wants the client thread to proceed. This is useful if the server is about to execute a blocking routine that is dependent upon the client thread during its setup routine.""" self.server_ready.set() def _setUp(self): self.wait_threads = threading_helper.wait_threads_exit() self.wait_threads.__enter__() self.server_ready = threading.Event() self.client_ready = threading.Event() self.done = threading.Event() self.queue = queue.Queue(1) self.server_crashed = False # Do some munging to start the client test. methodname = self.id() i = methodname.rfind('.') methodname = methodname[i+1:] test_method = getattr(self, '_' + methodname) self.client_thread = thread.start_new_thread( self.clientRun, (test_method,)) try: self.__setUp() except: self.server_crashed = True raise finally: self.server_ready.set() self.client_ready.wait() def _tearDown(self): self.__tearDown() self.done.wait() self.wait_threads.__exit__(None, None, None) if self.queue.qsize(): exc = self.queue.get() raise exc def clientRun(self, test_func): self.server_ready.wait() try: self.clientSetUp() except BaseException as e: self.queue.put(e) self.clientTearDown() return finally: self.client_ready.set() if self.server_crashed: self.clientTearDown() return if not hasattr(test_func, '__call__'): raise TypeError("test_func must be a callable function") try: test_func() except BaseException as e: self.queue.put(e) finally: self.clientTearDown() def clientSetUp(self): raise NotImplementedError("clientSetUp must be implemented.") def clientTearDown(self): self.done.set() thread.exit() class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketTCPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketUDPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') class ThreadedUDPLITESocketTest(SocketUDPLITETest, ThreadableTest): def __init__(self, methodName='runTest'): SocketUDPLITETest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE) def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) class ThreadedCANSocketTest(SocketCANTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketCANTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) try: self.cli.bind((self.interface,)) except OSError: # skipTest should not be called here, and will be called in the # server instead pass def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketRDSTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) try: # RDS sockets must be bound explicitly to send or receive data self.cli.bind((HOST, 0)) self.cli_addr = self.cli.getsockname() except OSError: # skipTest should not be called here, and will be called in the # server instead pass def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) @unittest.skipIf(fcntl is None, "need fcntl") @unittest.skipUnless(HAVE_SOCKET_VSOCK, 'VSOCK sockets required for this test.') @unittest.skipUnless(get_cid() != 2, "This test can only be run on a virtual guest.") class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest): def __init__(self, methodName='runTest'): unittest.TestCase.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def setUp(self): self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) self.addCleanup(self.serv.close) self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT)) self.serv.listen() self.serverExplicitReady() self.conn, self.connaddr = self.serv.accept() self.addCleanup(self.conn.close) def clientSetUp(self): time.sleep(0.1) self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) self.addCleanup(self.cli.close) cid = get_cid() self.cli.connect((cid, VSOCKPORT)) def testStream(self): msg = self.conn.recv(1024) self.assertEqual(msg, MSG) def _testStream(self): self.cli.send(MSG) self.cli.close() class SocketConnectedTest(ThreadedTCPSocketTest): """Socket tests for client-server connection. self.cli_conn is a client socket connected to the server. The setUp() method guarantees that it is connected to the server. """ def __init__(self, methodName='runTest'): ThreadedTCPSocketTest.__init__(self, methodName=methodName) def setUp(self): ThreadedTCPSocketTest.setUp(self) # Indicate explicitly we're ready for the client thread to # proceed and then perform the blocking call to accept self.serverExplicitReady() conn, addr = self.serv.accept() self.cli_conn = conn def tearDown(self): self.cli_conn.close() self.cli_conn = None ThreadedTCPSocketTest.tearDown(self) def clientSetUp(self): ThreadedTCPSocketTest.clientSetUp(self) self.cli.connect((HOST, self.port)) self.serv_conn = self.cli def clientTearDown(self): self.serv_conn.close() self.serv_conn = None ThreadedTCPSocketTest.clientTearDown(self) class SocketPairTest(unittest.TestCase, ThreadableTest): def __init__(self, methodName='runTest'): unittest.TestCase.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def setUp(self): self.serv, self.cli = socket.socketpair() def tearDown(self): self.serv.close() self.serv = None def clientSetUp(self): pass def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) # The following classes are used by the sendmsg()/recvmsg() tests. # Combining, for instance, ConnectedStreamTestMixin and TCPTestBase # gives a drop-in replacement for SocketConnectedTest, but different # address families can be used, and the attributes serv_addr and # cli_addr will be set to the addresses of the endpoints. class SocketTestBase(unittest.TestCase): """A base class for socket tests. Subclasses must provide methods newSocket() to return a new socket and bindSock(sock) to bind it to an unused address. Creates a socket self.serv and sets self.serv_addr to its address. """ def setUp(self): self.serv = self.newSocket() self.bindServer() def bindServer(self): """Bind server socket and set self.serv_addr to its address.""" self.bindSock(self.serv) self.serv_addr = self.serv.getsockname() def tearDown(self): self.serv.close() self.serv = None class SocketListeningTestMixin(SocketTestBase): """Mixin to listen on the server socket.""" def setUp(self): super().setUp() self.serv.listen() class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase, ThreadableTest): """Mixin to add client socket and allow client/server tests. Client socket is self.cli and its address is self.cli_addr. See ThreadableTest for usage information. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) ThreadableTest.__init__(self) def clientSetUp(self): self.cli = self.newClientSocket() self.bindClient() def newClientSocket(self): """Return a new socket for use as client.""" return self.newSocket() def bindClient(self): """Bind client socket and set self.cli_addr to its address.""" self.bindSock(self.cli) self.cli_addr = self.cli.getsockname() def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) class ConnectedStreamTestMixin(SocketListeningTestMixin, ThreadedSocketTestMixin): """Mixin to allow client/server stream tests with connected client. Server's socket representing connection to client is self.cli_conn and client's connection to server is self.serv_conn. (Based on SocketConnectedTest.) """ def setUp(self): super().setUp() # Indicate explicitly we're ready for the client thread to # proceed and then perform the blocking call to accept self.serverExplicitReady() conn, addr = self.serv.accept() self.cli_conn = conn def tearDown(self): self.cli_conn.close() self.cli_conn = None super().tearDown() def clientSetUp(self): super().clientSetUp() self.cli.connect(self.serv_addr) self.serv_conn = self.cli def clientTearDown(self): try: self.serv_conn.close() self.serv_conn = None except AttributeError: pass super().clientTearDown() class UnixSocketTestBase(SocketTestBase): """Base class for Unix-domain socket tests.""" # This class is used for file descriptor passing tests, so we # create the sockets in a private directory so that other users # can't send anything that might be problematic for a privileged # user running the tests. def setUp(self): self.dir_path = tempfile.mkdtemp() self.addCleanup(os.rmdir, self.dir_path) super().setUp() def bindSock(self, sock): path = tempfile.mktemp(dir=self.dir_path) socket_helper.bind_unix_socket(sock, path) self.addCleanup(os_helper.unlink, path) class UnixStreamBase(UnixSocketTestBase): """Base class for Unix-domain SOCK_STREAM tests.""" def newSocket(self): return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) class InetTestBase(SocketTestBase): """Base class for IPv4 socket tests.""" host = HOST def setUp(self): super().setUp() self.port = self.serv_addr[1] def bindSock(self, sock): socket_helper.bind_port(sock, host=self.host) class TCPTestBase(InetTestBase): """Base class for TCP-over-IPv4 tests.""" def newSocket(self): return socket.socket(socket.AF_INET, socket.SOCK_STREAM) class UDPTestBase(InetTestBase): """Base class for UDP-over-IPv4 tests.""" def newSocket(self): return socket.socket(socket.AF_INET, socket.SOCK_DGRAM) class UDPLITETestBase(InetTestBase): """Base class for UDPLITE-over-IPv4 tests.""" def newSocket(self): return socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE) class SCTPStreamBase(InetTestBase): """Base class for SCTP tests in one-to-one (SOCK_STREAM) mode.""" def newSocket(self): return socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_SCTP) class Inet6TestBase(InetTestBase): """Base class for IPv6 socket tests.""" host = socket_helper.HOSTv6 class UDP6TestBase(Inet6TestBase): """Base class for UDP-over-IPv6 tests.""" def newSocket(self): return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) class UDPLITE6TestBase(Inet6TestBase): """Base class for UDPLITE-over-IPv6 tests.""" def newSocket(self): return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE) # Test-skipping decorators for use with ThreadableTest. def skipWithClientIf(condition, reason): """Skip decorated test if condition is true, add client_skip decorator. If the decorated object is not a class, sets its attribute "client_skip" to a decorator which will return an empty function if the test is to be skipped, or the original function if it is not. This can be used to avoid running the client part of a skipped test when using ThreadableTest. """ def client_pass(*args, **kwargs): pass def skipdec(obj): retval = unittest.skip(reason)(obj) if not isinstance(obj, type): retval.client_skip = lambda f: client_pass return retval def noskipdec(obj): if not (isinstance(obj, type) or hasattr(obj, "client_skip")): obj.client_skip = lambda f: f return obj return skipdec if condition else noskipdec def requireAttrs(obj, *attributes): """Skip decorated test if obj is missing any of the given attributes. Sets client_skip attribute as skipWithClientIf() does. """ missing = [name for name in attributes if not hasattr(obj, name)] return skipWithClientIf( missing, "don't have " + ", ".join(name for name in missing)) def requireSocket(*args): """Skip decorated test if a socket cannot be created with given arguments. When an argument is given as a string, will use the value of that attribute of the socket module, or skip the test if it doesn't exist. Sets client_skip attribute as skipWithClientIf() does. """ err = None missing = [obj for obj in args if isinstance(obj, str) and not hasattr(socket, obj)] if missing: err = "don't have " + ", ".join(name for name in missing) else: callargs = [getattr(socket, obj) if isinstance(obj, str) else obj for obj in args] try: s = socket.socket(*callargs) except OSError as e: # XXX: check errno? err = str(e) else: s.close() return skipWithClientIf( err is not None, "can't create socket({0}): {1}".format( ", ".join(str(o) for o in args), err)) ####################################################################### ## Begin Tests class GeneralModuleTests(unittest.TestCase): def test_SocketType_is_socketobject(self): import _socket self.assertTrue(socket.SocketType is _socket.socket) s = socket.socket() self.assertIsInstance(s, socket.SocketType) s.close() def test_repr(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) with s: self.assertIn('fd=%i' % s.fileno(), repr(s)) self.assertIn('family=%s' % socket.AF_INET, repr(s)) self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s)) self.assertIn('proto=0', repr(s)) self.assertNotIn('raddr', repr(s)) s.bind(('127.0.0.1', 0)) self.assertIn('laddr', repr(s)) self.assertIn(str(s.getsockname()), repr(s)) self.assertIn('[closed]', repr(s)) self.assertNotIn('laddr', repr(s)) @unittest.skipUnless(_socket is not None, 'need _socket module') def test_csocket_repr(self): s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) try: expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>' % (s.fileno(), s.family, s.type, s.proto)) self.assertEqual(repr(s), expected) finally: s.close() expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>' % (s.family, s.type, s.proto)) self.assertEqual(repr(s), expected) def test_weakref(self): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: p = proxy(s) self.assertEqual(p.fileno(), s.fileno()) s = None try: p.fileno() except ReferenceError: pass else: self.fail('Socket proxy still exists') def testSocketError(self): # Testing socket module exceptions msg = "Error raising socket exception (%s)." with self.assertRaises(OSError, msg=msg % 'OSError'): raise OSError with self.assertRaises(OSError, msg=msg % 'socket.herror'): raise socket.herror with self.assertRaises(OSError, msg=msg % 'socket.gaierror'): raise socket.gaierror def testSendtoErrors(self): # Testing that sendto doesn't mask failures. See #10169. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.addCleanup(s.close) s.bind(('', 0)) sockname = s.getsockname() # 2 args with self.assertRaises(TypeError) as cm: s.sendto('\u2620', sockname) self.assertEqual(str(cm.exception), "a bytes-like object is required, not 'str'") with self.assertRaises(TypeError) as cm: s.sendto(5j, sockname) self.assertEqual(str(cm.exception), "a bytes-like object is required, not 'complex'") with self.assertRaises(TypeError) as cm: s.sendto(b'foo', None) self.assertIn('not NoneType',str(cm.exception)) # 3 args with self.assertRaises(TypeError) as cm: s.sendto('\u2620', 0, sockname) self.assertEqual(str(cm.exception), "a bytes-like object is required, not 'str'") with self.assertRaises(TypeError) as cm: s.sendto(5j, 0, sockname) self.assertEqual(str(cm.exception), "a bytes-like object is required, not 'complex'") with self.assertRaises(TypeError) as cm: s.sendto(b'foo', 0, None) self.assertIn('not NoneType', str(cm.exception)) with self.assertRaises(TypeError) as cm: s.sendto(b'foo', 'bar', sockname) with self.assertRaises(TypeError) as cm: s.sendto(b'foo', None, None) # wrong number of args with self.assertRaises(TypeError) as cm: s.sendto(b'foo') self.assertIn('(1 given)', str(cm.exception)) with self.assertRaises(TypeError) as cm: s.sendto(b'foo', 0, sockname, 4) self.assertIn('(4 given)', str(cm.exception)) def testCrucialConstants(self): # Testing for mission critical constants socket.AF_INET if socket.has_ipv6: socket.AF_INET6 socket.SOCK_STREAM socket.SOCK_DGRAM socket.SOCK_RAW socket.SOCK_RDM socket.SOCK_SEQPACKET socket.SOL_SOCKET socket.SO_REUSEADDR def testCrucialIpProtoConstants(self): socket.IPPROTO_TCP socket.IPPROTO_UDP if socket.has_ipv6: socket.IPPROTO_IPV6 @unittest.skipUnless(os.name == "nt", "Windows specific") def testWindowsSpecificConstants(self): socket.IPPROTO_ICLFXBM socket.IPPROTO_ST socket.IPPROTO_CBT socket.IPPROTO_IGP socket.IPPROTO_RDP socket.IPPROTO_PGM socket.IPPROTO_L2TP socket.IPPROTO_SCTP @unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test') @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test') def test3542SocketOptions(self): # Ref. issue #35569 and https://tools.ietf.org/html/rfc3542 opts = { 'IPV6_CHECKSUM', 'IPV6_DONTFRAG', 'IPV6_DSTOPTS', 'IPV6_HOPLIMIT', 'IPV6_HOPOPTS', 'IPV6_NEXTHOP', 'IPV6_PATHMTU', 'IPV6_PKTINFO', 'IPV6_RECVDSTOPTS', 'IPV6_RECVHOPLIMIT', 'IPV6_RECVHOPOPTS', 'IPV6_RECVPATHMTU', 'IPV6_RECVPKTINFO', 'IPV6_RECVRTHDR', 'IPV6_RECVTCLASS', 'IPV6_RTHDR', 'IPV6_RTHDRDSTOPTS', 'IPV6_RTHDR_TYPE_0', 'IPV6_TCLASS', 'IPV6_USE_MIN_MTU', } for opt in opts: self.assertTrue( hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'" ) def testHostnameRes(self): # Testing hostname resolution mechanisms hostname = socket.gethostname() try: ip = socket.gethostbyname(hostname) except OSError: # Probably name lookup wasn't set up right; skip this test self.skipTest('name lookup failure') self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.") try: hname, aliases, ipaddrs = socket.gethostbyaddr(ip) except OSError: # Probably a similar problem as above; skip this test self.skipTest('name lookup failure') all_host_names = [hostname, hname] + aliases fqhn = socket.getfqdn(ip) if not fqhn in all_host_names: self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names))) def test_host_resolution(self): for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']: self.assertEqual(socket.gethostbyname(addr), addr) # we don't test socket_helper.HOSTv6 because there's a chance it doesn't have # a matching name entry (e.g. 'ip6-localhost') for host in [socket_helper.HOSTv4]: self.assertIn(host, socket.gethostbyaddr(host)[2]) def test_host_resolution_bad_address(self): # These are all malformed IP addresses and expected not to resolve to # any result. But some ISPs, e.g. AWS, may successfully resolve these # IPs. explanation = ( "resolving an invalid IP address did not raise OSError; " "can be caused by a broken DNS server" ) for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2', '1:1:1:1:1:1:1:1:1']: with self.assertRaises(OSError, msg=addr): socket.gethostbyname(addr) with self.assertRaises(OSError, msg=explanation): socket.gethostbyaddr(addr) @unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()") @unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()") def test_sethostname(self): oldhn = socket.gethostname() try: socket.sethostname('new') except OSError as e: if e.errno == errno.EPERM: self.skipTest("test should be run as root") else: raise try: # running test as root! self.assertEqual(socket.gethostname(), 'new') # Should work with bytes objects too socket.sethostname(b'bar') self.assertEqual(socket.gethostname(), 'bar') finally: socket.sethostname(oldhn) @unittest.skipUnless(hasattr(socket, 'if_nameindex'), 'socket.if_nameindex() not available.') def testInterfaceNameIndex(self): interfaces = socket.if_nameindex() for index, name in interfaces: self.assertIsInstance(index, int) self.assertIsInstance(name, str) # interface indices are non-zero integers self.assertGreater(index, 0) _index = socket.if_nametoindex(name) self.assertIsInstance(_index, int) self.assertEqual(index, _index) _name = socket.if_indextoname(index) self.assertIsInstance(_name, str) self.assertEqual(name, _name) @unittest.skipUnless(hasattr(socket, 'if_indextoname'), 'socket.if_indextoname() not available.') def testInvalidInterfaceIndexToName(self): self.assertRaises(OSError, socket.if_indextoname, 0) self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF') @unittest.skipUnless(hasattr(socket, 'if_nametoindex'), 'socket.if_nametoindex() not available.') def testInvalidInterfaceNameToIndex(self): self.assertRaises(TypeError, socket.if_nametoindex, 0) self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF') @unittest.skipUnless(hasattr(sys, 'getrefcount'), 'test needs sys.getrefcount()') def testRefCountGetNameInfo(self): # Testing reference count for getnameinfo try: # On some versions, this loses a reference orig = sys.getrefcount(__name__) socket.getnameinfo(__name__,0) except TypeError: if sys.getrefcount(__name__) != orig: self.fail("socket.getnameinfo loses a reference") def testInterpreterCrash(self): # Making sure getnameinfo doesn't crash the interpreter try: # On some versions, this crashes the interpreter. socket.getnameinfo(('x', 0, 0, 0), 0) except OSError: pass def testNtoH(self): # This just checks that htons etc. are their own inverse, # when looking at the lower 16 or 32 bits. sizes = {socket.htonl: 32, socket.ntohl: 32, socket.htons: 16, socket.ntohs: 16} for func, size in sizes.items(): mask = (1<<size) - 1 for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210): self.assertEqual(i & mask, func(func(i&mask)) & mask) swapped = func(mask) self.assertEqual(swapped & mask, mask) self.assertRaises(OverflowError, func, 1<<34) @support.cpython_only def testNtoHErrors(self): import _testcapi s_good_values = [0, 1, 2, 0xffff] l_good_values = s_good_values + [0xffffffff] l_bad_values = [-1, -2, 1<<32, 1<<1000] s_bad_values = ( l_bad_values + [_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] + [1 << 16, _testcapi.INT_MAX] ) for k in s_good_values: socket.ntohs(k) socket.htons(k) for k in l_good_values: socket.ntohl(k) socket.htonl(k) for k in s_bad_values: self.assertRaises(OverflowError, socket.ntohs, k) self.assertRaises(OverflowError, socket.htons, k) for k in l_bad_values: self.assertRaises(OverflowError, socket.ntohl, k) self.assertRaises(OverflowError, socket.htonl, k) def testGetServBy(self): eq = self.assertEqual # Find one service that exists, then check all the related interfaces. # I've ordered this by protocols that have both a tcp and udp # protocol, at least for modern Linuxes. if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd')) or sys.platform in ('linux', 'darwin')): # avoid the 'echo' service on this platform, as there is an # assumption breaking non-standard port/protocol entry services = ('daytime', 'qotd', 'domain') else: services = ('echo', 'daytime', 'domain') for service in services: try: port = socket.getservbyname(service, 'tcp') break except OSError: pass else: raise OSError # Try same call with optional protocol omitted # Issue #26936: Android getservbyname() was broken before API 23. if (not hasattr(sys, 'getandroidapilevel') or sys.getandroidapilevel() >= 23): port2 = socket.getservbyname(service) eq(port, port2) # Try udp, but don't barf if it doesn't exist try: udpport = socket.getservbyname(service, 'udp') except OSError: udpport = None else: eq(udpport, port) # Now make sure the lookup by port returns the same service name # Issue #26936: Android getservbyport() is broken. if not support.is_android: eq(socket.getservbyport(port2), service) eq(socket.getservbyport(port, 'tcp'), service) if udpport is not None: eq(socket.getservbyport(udpport, 'udp'), service) # Make sure getservbyport does not accept out of range ports. self.assertRaises(OverflowError, socket.getservbyport, -1) self.assertRaises(OverflowError, socket.getservbyport, 65536) def testDefaultTimeout(self): # Testing default timeout # The default timeout should initially be None self.assertEqual(socket.getdefaulttimeout(), None) with socket.socket() as s: self.assertEqual(s.gettimeout(), None) # Set the default timeout to 10, and see if it propagates with socket_setdefaulttimeout(10): self.assertEqual(socket.getdefaulttimeout(), 10) with socket.socket() as sock: self.assertEqual(sock.gettimeout(), 10) # Reset the default timeout to None, and see if it propagates socket.setdefaulttimeout(None) self.assertEqual(socket.getdefaulttimeout(), None) with socket.socket() as sock: self.assertEqual(sock.gettimeout(), None) # Check that setting it to an invalid value raises ValueError self.assertRaises(ValueError, socket.setdefaulttimeout, -1) # Check that setting it to an invalid type raises TypeError self.assertRaises(TypeError, socket.setdefaulttimeout, "spam") @unittest.skipUnless(hasattr(socket, 'inet_aton'), 'test needs socket.inet_aton()') def testIPv4_inet_aton_fourbytes(self): # Test that issue1008086 and issue767150 are fixed. # It must return 4 bytes. self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0')) self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255')) @unittest.skipUnless(hasattr(socket, 'inet_pton'), 'test needs socket.inet_pton()') def testIPv4toString(self): from socket import inet_aton as f, inet_pton, AF_INET g = lambda a: inet_pton(AF_INET, a) assertInvalid = lambda func,a: self.assertRaises( (OSError, ValueError), func, a ) self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0')) self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0')) self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170')) self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4')) self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255')) # bpo-29972: inet_pton() doesn't fail on AIX if not AIX: assertInvalid(f, '0.0.0.') assertInvalid(f, '300.0.0.0') assertInvalid(f, 'a.0.0.0') assertInvalid(f, '1.2.3.4.5') assertInvalid(f, '::1') self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0')) self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0')) self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170')) self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255')) assertInvalid(g, '0.0.0.') assertInvalid(g, '300.0.0.0') assertInvalid(g, 'a.0.0.0') assertInvalid(g, '1.2.3.4.5') assertInvalid(g, '::1') @unittest.skipUnless(hasattr(socket, 'inet_pton'), 'test needs socket.inet_pton()') def testIPv6toString(self): try: from socket import inet_pton, AF_INET6, has_ipv6 if not has_ipv6: self.skipTest('IPv6 not available') except ImportError: self.skipTest('could not import needed symbols from socket') if sys.platform == "win32": try: inet_pton(AF_INET6, '::') except OSError as e: if e.winerror == 10022: self.skipTest('IPv6 might not be supported') f = lambda a: inet_pton(AF_INET6, a) assertInvalid = lambda a: self.assertRaises( (OSError, ValueError), f, a ) self.assertEqual(b'\x00' * 16, f('::')) self.assertEqual(b'\x00' * 16, f('0::0')) self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::')) self.assertEqual( b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae', f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae') ) self.assertEqual( b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02', f('ad42:abc::127:0:254:2') ) self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::')) assertInvalid('0x20::') assertInvalid(':::') assertInvalid('::0::') assertInvalid('1::abc::') assertInvalid('1::abc::def') assertInvalid('1:2:3:4:5:6') assertInvalid('1:2:3:4:5:6:') assertInvalid('1:2:3:4:5:6:7:8:0') # bpo-29972: inet_pton() doesn't fail on AIX if not AIX: assertInvalid('1:2:3:4:5:6:7:8:') self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40', f('::254.42.23.64') ) self.assertEqual( b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40', f('42::a29b:254.42.23.64') ) self.assertEqual( b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40', f('42:a8b9:0:2:ffff:a29b:254.42.23.64') ) assertInvalid('255.254.253.252') assertInvalid('1::260.2.3.0') assertInvalid('1::0.be.e.0') assertInvalid('1:2:3:4:5:6:7:1.2.3.4') assertInvalid('::1.2.3.4:0') assertInvalid('0.100.200.0:3:4:5:6:7:8') @unittest.skipUnless(hasattr(socket, 'inet_ntop'), 'test needs socket.inet_ntop()') def testStringToIPv4(self): from socket import inet_ntoa as f, inet_ntop, AF_INET g = lambda a: inet_ntop(AF_INET, a) assertInvalid = lambda func,a: self.assertRaises( (OSError, ValueError), func, a ) self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00')) self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55')) self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff')) self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04')) assertInvalid(f, b'\x00' * 3) assertInvalid(f, b'\x00' * 5) assertInvalid(f, b'\x00' * 16) self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55'))) self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00')) self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55')) self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff')) assertInvalid(g, b'\x00' * 3) assertInvalid(g, b'\x00' * 5) assertInvalid(g, b'\x00' * 16) self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55'))) @unittest.skipUnless(hasattr(socket, 'inet_ntop'), 'test needs socket.inet_ntop()') def testStringToIPv6(self): try: from socket import inet_ntop, AF_INET6, has_ipv6 if not has_ipv6: self.skipTest('IPv6 not available') except ImportError: self.skipTest('could not import needed symbols from socket') if sys.platform == "win32": try: inet_ntop(AF_INET6, b'\x00' * 16) except OSError as e: if e.winerror == 10022: self.skipTest('IPv6 might not be supported') f = lambda a: inet_ntop(AF_INET6, a) assertInvalid = lambda a: self.assertRaises( (OSError, ValueError), f, a ) self.assertEqual('::', f(b'\x00' * 16)) self.assertEqual('::1', f(b'\x00' * 15 + b'\x01')) self.assertEqual( 'aef:b01:506:1001:ffff:9997:55:170', f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70') ) self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01'))) assertInvalid(b'\x12' * 15) assertInvalid(b'\x12' * 17) assertInvalid(b'\x12' * 4) # XXX The following don't test module-level functionality... def testSockName(self): # Testing getsockname() port = socket_helper.find_unused_port() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) sock.bind(("0.0.0.0", port)) name = sock.getsockname() # XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate # it reasonable to get the host's addr in addition to 0.0.0.0. # At least for eCos. This is required for the S/390 to pass. try: my_ip_addr = socket.gethostbyname(socket.gethostname()) except OSError: # Probably name lookup wasn't set up right; skip this test self.skipTest('name lookup failure') self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0]) self.assertEqual(name[1], port) def testGetSockOpt(self): # Testing getsockopt() # We know a socket should start without reuse==0 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) self.assertFalse(reuse != 0, "initial mode is reuse") def testSetSockOpt(self): # Testing setsockopt() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) self.assertFalse(reuse == 0, "failed to set reuse mode") def testSendAfterClose(self): # testing send() after close() with timeout with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.settimeout(1) self.assertRaises(OSError, sock.send, b"spam") def testCloseException(self): sock = socket.socket() sock.bind((socket._LOCALHOST, 0)) socket.socket(fileno=sock.fileno()).close() try: sock.close() except OSError as err: # Winsock apparently raises ENOTSOCK self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK)) else: self.fail("close() should raise EBADF/ENOTSOCK") def testNewAttributes(self): # testing .family, .type and .protocol with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: self.assertEqual(sock.family, socket.AF_INET) if hasattr(socket, 'SOCK_CLOEXEC'): self.assertIn(sock.type, (socket.SOCK_STREAM | socket.SOCK_CLOEXEC, socket.SOCK_STREAM)) else: self.assertEqual(sock.type, socket.SOCK_STREAM) self.assertEqual(sock.proto, 0) def test_getsockaddrarg(self): sock = socket.socket() self.addCleanup(sock.close) port = socket_helper.find_unused_port() big_port = port + 65536 neg_port = port - 65536 self.assertRaises(OverflowError, sock.bind, (HOST, big_port)) self.assertRaises(OverflowError, sock.bind, (HOST, neg_port)) # Since find_unused_port() is inherently subject to race conditions, we # call it a couple times if necessary. for i in itertools.count(): port = socket_helper.find_unused_port() try: sock.bind((HOST, port)) except OSError as e: if e.errno != errno.EADDRINUSE or i == 5: raise else: break @unittest.skipUnless(os.name == "nt", "Windows specific") def test_sock_ioctl(self): self.assertTrue(hasattr(socket.socket, 'ioctl')) self.assertTrue(hasattr(socket, 'SIO_RCVALL')) self.assertTrue(hasattr(socket, 'RCVALL_ON')) self.assertTrue(hasattr(socket, 'RCVALL_OFF')) self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS')) s = socket.socket() self.addCleanup(s.close) self.assertRaises(ValueError, s.ioctl, -1, None) s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100)) @unittest.skipUnless(os.name == "nt", "Windows specific") @unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'), 'Loopback fast path support required for this test') def test_sio_loopback_fast_path(self): s = socket.socket() self.addCleanup(s.close) try: s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True) except OSError as exc: WSAEOPNOTSUPP = 10045 if exc.winerror == WSAEOPNOTSUPP: self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but " "doesn't implemented in this Windows version") raise self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None) def testGetaddrinfo(self): try: socket.getaddrinfo('localhost', 80) except socket.gaierror as err: if err.errno == socket.EAI_SERVICE: # see http://bugs.python.org/issue1282647 self.skipTest("buggy libc version") raise # len of every sequence is supposed to be == 5 for info in socket.getaddrinfo(HOST, None): self.assertEqual(len(info), 5) # host can be a domain name, a string representation of an # IPv4/v6 address or None socket.getaddrinfo('localhost', 80) socket.getaddrinfo('127.0.0.1', 80) socket.getaddrinfo(None, 80) if socket_helper.IPV6_ENABLED: socket.getaddrinfo('::1', 80) # port can be a string service name such as "http", a numeric # port number or None # Issue #26936: Android getaddrinfo() was broken before API level 23. if (not hasattr(sys, 'getandroidapilevel') or sys.getandroidapilevel() >= 23): socket.getaddrinfo(HOST, "http") socket.getaddrinfo(HOST, 80) socket.getaddrinfo(HOST, None) # test family and socktype filters infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM) for family, type, _, _, _ in infos: self.assertEqual(family, socket.AF_INET) self.assertEqual(str(family), 'AF_INET') self.assertEqual(type, socket.SOCK_STREAM) self.assertEqual(str(type), 'SOCK_STREAM') infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM) for _, socktype, _, _, _ in infos: self.assertEqual(socktype, socket.SOCK_STREAM) # test proto and flags arguments socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP) socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE) # a server willing to support both IPv4 and IPv6 will # usually do this socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) # test keyword arguments a = socket.getaddrinfo(HOST, None) b = socket.getaddrinfo(host=HOST, port=None) self.assertEqual(a, b) a = socket.getaddrinfo(HOST, None, socket.AF_INET) b = socket.getaddrinfo(HOST, None, family=socket.AF_INET) self.assertEqual(a, b) a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM) b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM) self.assertEqual(a, b) a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP) b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP) self.assertEqual(a, b) a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE) b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE) self.assertEqual(a, b) a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM, proto=0, flags=socket.AI_PASSIVE) self.assertEqual(a, b) # Issue #6697. self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800') # Issue 17269: test workaround for OS X platform bug segfault if hasattr(socket, 'AI_NUMERICSERV'): try: # The arguments here are undefined and the call may succeed # or fail. All we care here is that it doesn't segfault. socket.getaddrinfo("localhost", None, 0, 0, 0, socket.AI_NUMERICSERV) except socket.gaierror: pass def test_getnameinfo(self): # only IP addresses are allowed self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0) @unittest.skipUnless(support.is_resource_enabled('network'), 'network is not enabled') def test_idna(self): # Check for internet access before running test # (issue #12804, issue #25138). with socket_helper.transient_internet('python.org'): socket.gethostbyname('python.org') # these should all be successful domain = 'испытание.pythontest.net' socket.gethostbyname(domain) socket.gethostbyname_ex(domain) socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM) # this may not work if the forward lookup chooses the IPv6 address, as that doesn't # have a reverse entry yet # socket.gethostbyaddr('испытание.python.org') def check_sendall_interrupted(self, with_timeout): # socketpair() is not strictly required, but it makes things easier. if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'): self.skipTest("signal.alarm and socket.socketpair required for this test") # Our signal handlers clobber the C errno by calling a math function # with an invalid domain value. def ok_handler(*args): self.assertRaises(ValueError, math.acosh, 0) def raising_handler(*args): self.assertRaises(ValueError, math.acosh, 0) 1 // 0 c, s = socket.socketpair() old_alarm = signal.signal(signal.SIGALRM, raising_handler) try: if with_timeout: # Just above the one second minimum for signal.alarm c.settimeout(1.5) with self.assertRaises(ZeroDivisionError): signal.alarm(1) c.sendall(b"x" * support.SOCK_MAX_SIZE) if with_timeout: signal.signal(signal.SIGALRM, ok_handler) signal.alarm(1) self.assertRaises(TimeoutError, c.sendall, b"x" * support.SOCK_MAX_SIZE) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_alarm) c.close() s.close() def test_sendall_interrupted(self): self.check_sendall_interrupted(False) def test_sendall_interrupted_with_timeout(self): self.check_sendall_interrupted(True) def test_dealloc_warn(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) r = repr(sock) with self.assertWarns(ResourceWarning) as cm: sock = None support.gc_collect() self.assertIn(r, str(cm.warning.args[0])) # An open socket file object gets dereferenced after the socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) f = sock.makefile('rb') r = repr(sock) sock = None support.gc_collect() with self.assertWarns(ResourceWarning): f = None support.gc_collect() def test_name_closed_socketio(self): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: fp = sock.makefile("rb") fp.close() self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>") def test_unusable_closed_socketio(self): with socket.socket() as sock: fp = sock.makefile("rb", buffering=0) self.assertTrue(fp.readable()) self.assertFalse(fp.writable()) self.assertFalse(fp.seekable()) fp.close() self.assertRaises(ValueError, fp.readable) self.assertRaises(ValueError, fp.writable) self.assertRaises(ValueError, fp.seekable) def test_socket_close(self): sock = socket.socket() try: sock.bind((HOST, 0)) socket.close(sock.fileno()) with self.assertRaises(OSError): sock.listen(1) finally: with self.assertRaises(OSError): # sock.close() fails with EBADF sock.close() with self.assertRaises(TypeError): socket.close(None) with self.assertRaises(OSError): socket.close(-1) def test_makefile_mode(self): for mode in 'r', 'rb', 'rw', 'w', 'wb': with self.subTest(mode=mode): with socket.socket() as sock: encoding = None if "b" in mode else "utf-8" with sock.makefile(mode, encoding=encoding) as fp: self.assertEqual(fp.mode, mode) def test_makefile_invalid_mode(self): for mode in 'rt', 'x', '+', 'a': with self.subTest(mode=mode): with socket.socket() as sock: with self.assertRaisesRegex(ValueError, 'invalid mode'): sock.makefile(mode) def test_pickle(self): sock = socket.socket() with sock: for protocol in range(pickle.HIGHEST_PROTOCOL + 1): self.assertRaises(TypeError, pickle.dumps, sock, protocol) for protocol in range(pickle.HIGHEST_PROTOCOL + 1): family = pickle.loads(pickle.dumps(socket.AF_INET, protocol)) self.assertEqual(family, socket.AF_INET) type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol)) self.assertEqual(type, socket.SOCK_STREAM) def test_listen_backlog(self): for backlog in 0, -1: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv: srv.bind((HOST, 0)) srv.listen(backlog) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv: srv.bind((HOST, 0)) srv.listen() @support.cpython_only def test_listen_backlog_overflow(self): # Issue 15989 import _testcapi with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv: srv.bind((HOST, 0)) self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') def test_flowinfo(self): self.assertRaises(OverflowError, socket.getnameinfo, (socket_helper.HOSTv6, 0, 0xffffffff), 0) with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s: self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10)) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') def test_getaddrinfo_ipv6_basic(self): ((*_, sockaddr),) = socket.getaddrinfo( 'ff02::1de:c0:face:8D', # Note capital letter `D`. 1234, socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP ) self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0)) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipIf(sys.platform == 'win32', 'does not work on Windows') @unittest.skipIf(AIX, 'Symbolic scope id does not work') @unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()") def test_getaddrinfo_ipv6_scopeid_symbolic(self): # Just pick up any network interface (Linux, Mac OS X) (ifindex, test_interface) = socket.if_nameindex()[0] ((*_, sockaddr),) = socket.getaddrinfo( 'ff02::1de:c0:face:8D%' + test_interface, 1234, socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP ) # Note missing interface name part in IPv6 address self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex)) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipUnless( sys.platform == 'win32', 'Numeric scope id does not work or undocumented') def test_getaddrinfo_ipv6_scopeid_numeric(self): # Also works on Linux and Mac OS X, but is not documented (?) # Windows, Linux and Max OS X allow nonexistent interface numbers here. ifindex = 42 ((*_, sockaddr),) = socket.getaddrinfo( 'ff02::1de:c0:face:8D%' + str(ifindex), 1234, socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP ) # Note missing interface name part in IPv6 address self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex)) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipIf(sys.platform == 'win32', 'does not work on Windows') @unittest.skipIf(AIX, 'Symbolic scope id does not work') @unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()") def test_getnameinfo_ipv6_scopeid_symbolic(self): # Just pick up any network interface. (ifindex, test_interface) = socket.if_nameindex()[0] sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`. nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV) self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234')) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipUnless( sys.platform == 'win32', 'Numeric scope id does not work or undocumented') def test_getnameinfo_ipv6_scopeid_numeric(self): # Also works on Linux (undocumented), but does not work on Mac OS X # Windows and Linux allow nonexistent interface numbers here. ifindex = 42 sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`. nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV) self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234')) def test_str_for_enums(self): # Make sure that the AF_* and SOCK_* constants have enum-like string # reprs. with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: self.assertEqual(str(s.family), 'AF_INET') self.assertEqual(str(s.type), 'SOCK_STREAM') def test_socket_consistent_sock_type(self): SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0) SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0) sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC with socket.socket(socket.AF_INET, sock_type) as s: self.assertEqual(s.type, socket.SOCK_STREAM) s.settimeout(1) self.assertEqual(s.type, socket.SOCK_STREAM) s.settimeout(0) self.assertEqual(s.type, socket.SOCK_STREAM) s.setblocking(True) self.assertEqual(s.type, socket.SOCK_STREAM) s.setblocking(False) self.assertEqual(s.type, socket.SOCK_STREAM) def test_unknown_socket_family_repr(self): # Test that when created with a family that's not one of the known # AF_*/SOCK_* constants, socket.family just returns the number. # # To do this we fool socket.socket into believing it already has an # open fd because on this path it doesn't actually verify the family and # type and populates the socket object. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) fd = sock.detach() unknown_family = max(socket.AddressFamily.__members__.values()) + 1 unknown_type = max( kind for name, kind in socket.SocketKind.__members__.items() if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'} ) + 1 with socket.socket( family=unknown_family, type=unknown_type, proto=23, fileno=fd) as s: self.assertEqual(s.family, unknown_family) self.assertEqual(s.type, unknown_type) # some OS like macOS ignore proto self.assertIn(s.proto, {0, 23}) @unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()') def test__sendfile_use_sendfile(self): class File: def __init__(self, fd): self.fd = fd def fileno(self): return self.fd with socket.socket() as sock: fd = os.open(os.curdir, os.O_RDONLY) os.close(fd) with self.assertRaises(socket._GiveupOnSendfile): sock._sendfile_use_sendfile(File(fd)) with self.assertRaises(OverflowError): sock._sendfile_use_sendfile(File(2**1000)) with self.assertRaises(TypeError): sock._sendfile_use_sendfile(File(None)) def _test_socket_fileno(self, s, family, stype): self.assertEqual(s.family, family) self.assertEqual(s.type, stype) fd = s.fileno() s2 = socket.socket(fileno=fd) self.addCleanup(s2.close) # detach old fd to avoid double close s.detach() self.assertEqual(s2.family, family) self.assertEqual(s2.type, stype) self.assertEqual(s2.fileno(), fd) def test_socket_fileno(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(s.close) s.bind((socket_helper.HOST, 0)) self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM) if hasattr(socket, "SOCK_DGRAM"): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.addCleanup(s.close) s.bind((socket_helper.HOST, 0)) self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM) if socket_helper.IPV6_ENABLED: s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) self.addCleanup(s.close) s.bind((socket_helper.HOSTv6, 0, 0, 0)) self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM) if hasattr(socket, "AF_UNIX"): tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir) s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.addCleanup(s.close) try: s.bind(os.path.join(tmpdir, 'socket')) except PermissionError: pass else: self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM) def test_socket_fileno_rejects_float(self): with self.assertRaises(TypeError): socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5) def test_socket_fileno_rejects_other_types(self): with self.assertRaises(TypeError): socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo") def test_socket_fileno_rejects_invalid_socket(self): with self.assertRaisesRegex(ValueError, "negative file descriptor"): socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1) @unittest.skipIf(os.name == "nt", "Windows disallows -1 only") def test_socket_fileno_rejects_negative(self): with self.assertRaisesRegex(ValueError, "negative file descriptor"): socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42) def test_socket_fileno_requires_valid_fd(self): WSAENOTSOCK = 10038 with self.assertRaises(OSError) as cm: socket.socket(fileno=os_helper.make_bad_fd()) self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK)) with self.assertRaises(OSError) as cm: socket.socket( socket.AF_INET, socket.SOCK_STREAM, fileno=os_helper.make_bad_fd()) self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK)) def test_socket_fileno_requires_socket_fd(self): with tempfile.NamedTemporaryFile() as afile: with self.assertRaises(OSError): socket.socket(fileno=afile.fileno()) with self.assertRaises(OSError) as cm: socket.socket( socket.AF_INET, socket.SOCK_STREAM, fileno=afile.fileno()) self.assertEqual(cm.exception.errno, errno.ENOTSOCK) def test_addressfamily_enum(self): import _socket, enum CheckedAddressFamily = enum._old_convert_( enum.IntEnum, 'AddressFamily', 'socket', lambda C: C.isupper() and C.startswith('AF_'), source=_socket, ) enum._test_simple_enum(CheckedAddressFamily, socket.AddressFamily) def test_socketkind_enum(self): import _socket, enum CheckedSocketKind = enum._old_convert_( enum.IntEnum, 'SocketKind', 'socket', lambda C: C.isupper() and C.startswith('SOCK_'), source=_socket, ) enum._test_simple_enum(CheckedSocketKind, socket.SocketKind) def test_msgflag_enum(self): import _socket, enum CheckedMsgFlag = enum._old_convert_( enum.IntFlag, 'MsgFlag', 'socket', lambda C: C.isupper() and C.startswith('MSG_'), source=_socket, ) enum._test_simple_enum(CheckedMsgFlag, socket.MsgFlag) def test_addressinfo_enum(self): import _socket, enum CheckedAddressInfo = enum._old_convert_( enum.IntFlag, 'AddressInfo', 'socket', lambda C: C.isupper() and C.startswith('AI_'), source=_socket) enum._test_simple_enum(CheckedAddressInfo, socket.AddressInfo) @unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.') class BasicCANTest(unittest.TestCase): def testCrucialConstants(self): socket.AF_CAN socket.PF_CAN socket.CAN_RAW @unittest.skipUnless(hasattr(socket, "CAN_BCM"), 'socket.CAN_BCM required for this test.') def testBCMConstants(self): socket.CAN_BCM # opcodes socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task socket.CAN_BCM_TX_SEND # send one CAN frame socket.CAN_BCM_RX_SETUP # create RX content filter subscription socket.CAN_BCM_RX_DELETE # remove RX content filter subscription socket.CAN_BCM_RX_READ # read properties of RX content filter subscription socket.CAN_BCM_TX_STATUS # reply to TX_READ request socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0) socket.CAN_BCM_RX_STATUS # reply to RX_READ request socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change) # flags socket.CAN_BCM_SETTIMER socket.CAN_BCM_STARTTIMER socket.CAN_BCM_TX_COUNTEVT socket.CAN_BCM_TX_ANNOUNCE socket.CAN_BCM_TX_CP_CAN_ID socket.CAN_BCM_RX_FILTER_ID socket.CAN_BCM_RX_CHECK_DLC socket.CAN_BCM_RX_NO_AUTOTIMER socket.CAN_BCM_RX_ANNOUNCE_RESUME socket.CAN_BCM_TX_RESET_MULTI_IDX socket.CAN_BCM_RX_RTR_FRAME def testCreateSocket(self): with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s: pass @unittest.skipUnless(hasattr(socket, "CAN_BCM"), 'socket.CAN_BCM required for this test.') def testCreateBCMSocket(self): with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s: pass def testBindAny(self): with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s: address = ('', ) s.bind(address) self.assertEqual(s.getsockname(), address) def testTooLongInterfaceName(self): # most systems limit IFNAMSIZ to 16, take 1024 to be sure with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s: self.assertRaisesRegex(OSError, 'interface name too long', s.bind, ('x' * 1024,)) @unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"), 'socket.CAN_RAW_LOOPBACK required for this test.') def testLoopback(self): with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s: for loopback in (0, 1): s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK, loopback) self.assertEqual(loopback, s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK)) @unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"), 'socket.CAN_RAW_FILTER required for this test.') def testFilter(self): can_id, can_mask = 0x200, 0x700 can_filter = struct.pack("=II", can_id, can_mask) with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s: s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter) self.assertEqual(can_filter, s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8)) s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter)) @unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.') class CANTest(ThreadedCANSocketTest): def __init__(self, methodName='runTest'): ThreadedCANSocketTest.__init__(self, methodName=methodName) @classmethod def build_can_frame(cls, can_id, data): """Build a CAN frame.""" can_dlc = len(data) data = data.ljust(8, b'\x00') return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data) @classmethod def dissect_can_frame(cls, frame): """Dissect a CAN frame.""" can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame) return (can_id, can_dlc, data[:can_dlc]) def testSendFrame(self): cf, addr = self.s.recvfrom(self.bufsize) self.assertEqual(self.cf, cf) self.assertEqual(addr[0], self.interface) def _testSendFrame(self): self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05') self.cli.send(self.cf) def testSendMaxFrame(self): cf, addr = self.s.recvfrom(self.bufsize) self.assertEqual(self.cf, cf) def _testSendMaxFrame(self): self.cf = self.build_can_frame(0x00, b'\x07' * 8) self.cli.send(self.cf) def testSendMultiFrames(self): cf, addr = self.s.recvfrom(self.bufsize) self.assertEqual(self.cf1, cf) cf, addr = self.s.recvfrom(self.bufsize) self.assertEqual(self.cf2, cf) def _testSendMultiFrames(self): self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11') self.cli.send(self.cf1) self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33') self.cli.send(self.cf2) @unittest.skipUnless(hasattr(socket, "CAN_BCM"), 'socket.CAN_BCM required for this test.') def _testBCM(self): cf, addr = self.cli.recvfrom(self.bufsize) self.assertEqual(self.cf, cf) can_id, can_dlc, data = self.dissect_can_frame(cf) self.assertEqual(self.can_id, can_id) self.assertEqual(self.data, data) @unittest.skipUnless(hasattr(socket, "CAN_BCM"), 'socket.CAN_BCM required for this test.') def testBCM(self): bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) self.addCleanup(bcm.close) bcm.connect((self.interface,)) self.can_id = 0x123 self.data = bytes([0xc0, 0xff, 0xee]) self.cf = self.build_can_frame(self.can_id, self.data) opcode = socket.CAN_BCM_TX_SEND flags = 0 count = 0 ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0 bcm_can_id = 0x0222 nframes = 1 assert len(self.cf) == 16 header = struct.pack(self.bcm_cmd_msg_fmt, opcode, flags, count, ival1_seconds, ival1_usec, ival2_seconds, ival2_usec, bcm_can_id, nframes, ) header_plus_frame = header + self.cf bytes_sent = bcm.send(header_plus_frame) self.assertEqual(bytes_sent, len(header_plus_frame)) @unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.') class ISOTPTest(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.interface = "vcan0" def testCrucialConstants(self): socket.AF_CAN socket.PF_CAN socket.CAN_ISOTP socket.SOCK_DGRAM @unittest.skipUnless(hasattr(socket, "SOL_CAN_ISOTP"), 'Constants from isotp.h required for this test.') def testIsoTpConstants(self): socket.SOL_CAN_ISOTP # for socket options affecting the socket (not the global system) socket.CAN_ISOTP_OPTS socket.CAN_ISOTP_RECV_FC # sockopts to force stmin timer values for protocol regression tests socket.CAN_ISOTP_TX_STMIN socket.CAN_ISOTP_RX_STMIN socket.CAN_ISOTP_LL_OPTS # flags for isotp behaviour socket.CAN_ISOTP_LISTEN_MODE socket.CAN_ISOTP_EXTEND_ADDR socket.CAN_ISOTP_TX_PADDING socket.CAN_ISOTP_RX_PADDING socket.CAN_ISOTP_CHK_PAD_LEN socket.CAN_ISOTP_CHK_PAD_DATA socket.CAN_ISOTP_HALF_DUPLEX socket.CAN_ISOTP_FORCE_TXSTMIN socket.CAN_ISOTP_FORCE_RXSTMIN socket.CAN_ISOTP_RX_EXT_ADDR socket.CAN_ISOTP_WAIT_TX_DONE # This constant is new and not always available # socket.CAN_ISOTP_SF_BROADCAST # default values socket.CAN_ISOTP_DEFAULT_FLAGS socket.CAN_ISOTP_DEFAULT_EXT_ADDRESS socket.CAN_ISOTP_DEFAULT_PAD_CONTENT socket.CAN_ISOTP_DEFAULT_FRAME_TXTIME socket.CAN_ISOTP_DEFAULT_RECV_BS socket.CAN_ISOTP_DEFAULT_EXT_ADDRESS socket.CAN_ISOTP_DEFAULT_RECV_STMIN socket.CAN_ISOTP_DEFAULT_RECV_WFTMAX socket.CAN_ISOTP_DEFAULT_LL_MTU socket.CAN_ISOTP_DEFAULT_LL_TX_DL socket.CAN_ISOTP_DEFAULT_LL_TX_FLAGS def testCreateSocket(self): with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s: pass @unittest.skipUnless(hasattr(socket, "CAN_ISOTP"), 'socket.CAN_ISOTP required for this test.') def testCreateISOTPSocket(self): with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s: pass def testTooLongInterfaceName(self): # most systems limit IFNAMSIZ to 16, take 1024 to be sure with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s: with self.assertRaisesRegex(OSError, 'interface name too long'): s.bind(('x' * 1024, 1, 2)) def testBind(self): try: with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s: addr = self.interface, 0x123, 0x456 s.bind(addr) self.assertEqual(s.getsockname(), addr) except OSError as e: if e.errno == errno.ENODEV: self.skipTest('network interface `%s` does not exist' % self.interface) else: raise @unittest.skipUnless(HAVE_SOCKET_CAN_J1939, 'CAN J1939 required for this test.') class J1939Test(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.interface = "vcan0" @unittest.skipUnless(hasattr(socket, "CAN_J1939"), 'socket.CAN_J1939 required for this test.') def testJ1939Constants(self): socket.CAN_J1939 socket.J1939_MAX_UNICAST_ADDR socket.J1939_IDLE_ADDR socket.J1939_NO_ADDR socket.J1939_NO_NAME socket.J1939_PGN_REQUEST socket.J1939_PGN_ADDRESS_CLAIMED socket.J1939_PGN_ADDRESS_COMMANDED socket.J1939_PGN_PDU1_MAX socket.J1939_PGN_MAX socket.J1939_NO_PGN # J1939 socket options socket.SO_J1939_FILTER socket.SO_J1939_PROMISC socket.SO_J1939_SEND_PRIO socket.SO_J1939_ERRQUEUE socket.SCM_J1939_DEST_ADDR socket.SCM_J1939_DEST_NAME socket.SCM_J1939_PRIO socket.SCM_J1939_ERRQUEUE socket.J1939_NLA_PAD socket.J1939_NLA_BYTES_ACKED socket.J1939_EE_INFO_NONE socket.J1939_EE_INFO_TX_ABORT socket.J1939_FILTER_MAX @unittest.skipUnless(hasattr(socket, "CAN_J1939"), 'socket.CAN_J1939 required for this test.') def testCreateJ1939Socket(self): with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s: pass def testBind(self): try: with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s: addr = self.interface, socket.J1939_NO_NAME, socket.J1939_NO_PGN, socket.J1939_NO_ADDR s.bind(addr) self.assertEqual(s.getsockname(), addr) except OSError as e: if e.errno == errno.ENODEV: self.skipTest('network interface `%s` does not exist' % self.interface) else: raise @unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.') class BasicRDSTest(unittest.TestCase): def testCrucialConstants(self): socket.AF_RDS socket.PF_RDS def testCreateSocket(self): with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s: pass def testSocketBufferSize(self): bufsize = 16384 with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s: s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize) s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize) @unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.') class RDSTest(ThreadedRDSSocketTest): def __init__(self, methodName='runTest'): ThreadedRDSSocketTest.__init__(self, methodName=methodName) def setUp(self): super().setUp() self.evt = threading.Event() def testSendAndRecv(self): data, addr = self.serv.recvfrom(self.bufsize) self.assertEqual(self.data, data) self.assertEqual(self.cli_addr, addr) def _testSendAndRecv(self): self.data = b'spam' self.cli.sendto(self.data, 0, (HOST, self.port)) def testPeek(self): data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK) self.assertEqual(self.data, data) data, addr = self.serv.recvfrom(self.bufsize) self.assertEqual(self.data, data) def _testPeek(self): self.data = b'spam' self.cli.sendto(self.data, 0, (HOST, self.port)) @requireAttrs(socket.socket, 'recvmsg') def testSendAndRecvMsg(self): data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize) self.assertEqual(self.data, data) @requireAttrs(socket.socket, 'sendmsg') def _testSendAndRecvMsg(self): self.data = b'hello ' * 10 self.cli.sendmsg([self.data], (), 0, (HOST, self.port)) def testSendAndRecvMulti(self): data, addr = self.serv.recvfrom(self.bufsize) self.assertEqual(self.data1, data) data, addr = self.serv.recvfrom(self.bufsize) self.assertEqual(self.data2, data) def _testSendAndRecvMulti(self): self.data1 = b'bacon' self.cli.sendto(self.data1, 0, (HOST, self.port)) self.data2 = b'egg' self.cli.sendto(self.data2, 0, (HOST, self.port)) def testSelect(self): r, w, x = select.select([self.serv], [], [], 3.0) self.assertIn(self.serv, r) data, addr = self.serv.recvfrom(self.bufsize) self.assertEqual(self.data, data) def _testSelect(self): self.data = b'select' self.cli.sendto(self.data, 0, (HOST, self.port)) @unittest.skipUnless(HAVE_SOCKET_QIPCRTR, 'QIPCRTR sockets required for this test.') class BasicQIPCRTRTest(unittest.TestCase): def testCrucialConstants(self): socket.AF_QIPCRTR def testCreateSocket(self): with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s: pass def testUnbound(self): with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s: self.assertEqual(s.getsockname()[1], 0) def testBindSock(self): with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s: socket_helper.bind_port(s, host=s.getsockname()[0]) self.assertNotEqual(s.getsockname()[1], 0) def testInvalidBindSock(self): with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s: self.assertRaises(OSError, socket_helper.bind_port, s, host=-2) def testAutoBindSock(self): with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s: s.connect((123, 123)) self.assertNotEqual(s.getsockname()[1], 0) @unittest.skipIf(fcntl is None, "need fcntl") @unittest.skipUnless(HAVE_SOCKET_VSOCK, 'VSOCK sockets required for this test.') class BasicVSOCKTest(unittest.TestCase): def testCrucialConstants(self): socket.AF_VSOCK def testVSOCKConstants(self): socket.SO_VM_SOCKETS_BUFFER_SIZE socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE socket.VMADDR_CID_ANY socket.VMADDR_PORT_ANY socket.VMADDR_CID_HOST socket.VM_SOCKETS_INVALID_VERSION socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID def testCreateSocket(self): with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s: pass def testSocketBufferSize(self): with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s: orig_max = s.getsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE) orig = s.getsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_SIZE) orig_min = s.getsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE) s.setsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2) s.setsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2) s.setsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2) self.assertEqual(orig_max * 2, s.getsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)) self.assertEqual(orig * 2, s.getsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_SIZE)) self.assertEqual(orig_min * 2, s.getsockopt(socket.AF_VSOCK, socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)) @unittest.skipUnless(HAVE_SOCKET_BLUETOOTH, 'Bluetooth sockets required for this test.') class BasicBluetoothTest(unittest.TestCase): def testBluetoothConstants(self): socket.BDADDR_ANY socket.BDADDR_LOCAL socket.AF_BLUETOOTH socket.BTPROTO_RFCOMM if sys.platform != "win32": socket.BTPROTO_HCI socket.SOL_HCI socket.BTPROTO_L2CAP if not sys.platform.startswith("freebsd"): socket.BTPROTO_SCO def testCreateRfcommSocket(self): with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s: pass @unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets") def testCreateL2capSocket(self): with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s: pass @unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets") def testCreateHciSocket(self): with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s: pass @unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"), "windows and freebsd do not support SCO sockets") def testCreateScoSocket(self): with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s: pass class BasicTCPTest(SocketConnectedTest): def __init__(self, methodName='runTest'): SocketConnectedTest.__init__(self, methodName=methodName) def testRecv(self): # Testing large receive over TCP msg = self.cli_conn.recv(1024) self.assertEqual(msg, MSG) def _testRecv(self): self.serv_conn.send(MSG) def testOverFlowRecv(self): # Testing receive in chunks over TCP seg1 = self.cli_conn.recv(len(MSG) - 3) seg2 = self.cli_conn.recv(1024) msg = seg1 + seg2 self.assertEqual(msg, MSG) def _testOverFlowRecv(self): self.serv_conn.send(MSG) def testRecvFrom(self): # Testing large recvfrom() over TCP msg, addr = self.cli_conn.recvfrom(1024) self.assertEqual(msg, MSG) def _testRecvFrom(self): self.serv_conn.send(MSG) def testOverFlowRecvFrom(self): # Testing recvfrom() in chunks over TCP seg1, addr = self.cli_conn.recvfrom(len(MSG)-3) seg2, addr = self.cli_conn.recvfrom(1024) msg = seg1 + seg2 self.assertEqual(msg, MSG) def _testOverFlowRecvFrom(self): self.serv_conn.send(MSG) def testSendAll(self): # Testing sendall() with a 2048 byte string over TCP msg = b'' while 1: read = self.cli_conn.recv(1024) if not read: break msg += read self.assertEqual(msg, b'f' * 2048) def _testSendAll(self): big_chunk = b'f' * 2048 self.serv_conn.sendall(big_chunk) def testFromFd(self): # Testing fromfd() fd = self.cli_conn.fileno() sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(sock.close) self.assertIsInstance(sock, socket.socket) msg = sock.recv(1024) self.assertEqual(msg, MSG) def _testFromFd(self): self.serv_conn.send(MSG) def testDup(self): # Testing dup() sock = self.cli_conn.dup() self.addCleanup(sock.close) msg = sock.recv(1024) self.assertEqual(msg, MSG) def _testDup(self): self.serv_conn.send(MSG) def testShutdown(self): # Testing shutdown() msg = self.cli_conn.recv(1024) self.assertEqual(msg, MSG) # wait for _testShutdown to finish: on OS X, when the server # closes the connection the client also becomes disconnected, # and the client's shutdown call will fail. (Issue #4397.) self.done.wait() def _testShutdown(self): self.serv_conn.send(MSG) self.serv_conn.shutdown(2) testShutdown_overflow = support.cpython_only(testShutdown) @support.cpython_only def _testShutdown_overflow(self): import _testcapi self.serv_conn.send(MSG) # Issue 15989 self.assertRaises(OverflowError, self.serv_conn.shutdown, _testcapi.INT_MAX + 1) self.assertRaises(OverflowError, self.serv_conn.shutdown, 2 + (_testcapi.UINT_MAX + 1)) self.serv_conn.shutdown(2) def testDetach(self): # Testing detach() fileno = self.cli_conn.fileno() f = self.cli_conn.detach() self.assertEqual(f, fileno) # cli_conn cannot be used anymore... self.assertTrue(self.cli_conn._closed) self.assertRaises(OSError, self.cli_conn.recv, 1024) self.cli_conn.close() # ...but we can create another socket using the (still open) # file descriptor sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f) self.addCleanup(sock.close) msg = sock.recv(1024) self.assertEqual(msg, MSG) def _testDetach(self): self.serv_conn.send(MSG) class BasicUDPTest(ThreadedUDPSocketTest): def __init__(self, methodName='runTest'): ThreadedUDPSocketTest.__init__(self, methodName=methodName) def testSendtoAndRecv(self): # Testing sendto() and Recv() over UDP msg = self.serv.recv(len(MSG)) self.assertEqual(msg, MSG) def _testSendtoAndRecv(self): self.cli.sendto(MSG, 0, (HOST, self.port)) def testRecvFrom(self): # Testing recvfrom() over UDP msg, addr = self.serv.recvfrom(len(MSG)) self.assertEqual(msg, MSG) def _testRecvFrom(self): self.cli.sendto(MSG, 0, (HOST, self.port)) def testRecvFromNegative(self): # Negative lengths passed to recvfrom should give ValueError. self.assertRaises(ValueError, self.serv.recvfrom, -1) def _testRecvFromNegative(self): self.cli.sendto(MSG, 0, (HOST, self.port)) @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') class BasicUDPLITETest(ThreadedUDPLITESocketTest): def __init__(self, methodName='runTest'): ThreadedUDPLITESocketTest.__init__(self, methodName=methodName) def testSendtoAndRecv(self): # Testing sendto() and Recv() over UDPLITE msg = self.serv.recv(len(MSG)) self.assertEqual(msg, MSG) def _testSendtoAndRecv(self): self.cli.sendto(MSG, 0, (HOST, self.port)) def testRecvFrom(self): # Testing recvfrom() over UDPLITE msg, addr = self.serv.recvfrom(len(MSG)) self.assertEqual(msg, MSG) def _testRecvFrom(self): self.cli.sendto(MSG, 0, (HOST, self.port)) def testRecvFromNegative(self): # Negative lengths passed to recvfrom should give ValueError. self.assertRaises(ValueError, self.serv.recvfrom, -1) def _testRecvFromNegative(self): self.cli.sendto(MSG, 0, (HOST, self.port)) # Tests for the sendmsg()/recvmsg() interface. Where possible, the # same test code is used with different families and types of socket # (e.g. stream, datagram), and tests using recvmsg() are repeated # using recvmsg_into(). # # The generic test classes such as SendmsgTests and # RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be # supplied with sockets cli_sock and serv_sock representing the # client's and the server's end of the connection respectively, and # attributes cli_addr and serv_addr holding their (numeric where # appropriate) addresses. # # The final concrete test classes combine these with subclasses of # SocketTestBase which set up client and server sockets of a specific # type, and with subclasses of SendrecvmsgBase such as # SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these # sockets to cli_sock and serv_sock and override the methods and # attributes of SendrecvmsgBase to fill in destination addresses if # needed when sending, check for specific flags in msg_flags, etc. # # RecvmsgIntoMixin provides a version of doRecvmsg() implemented using # recvmsg_into(). # XXX: like the other datagram (UDP) tests in this module, the code # here assumes that datagram delivery on the local machine will be # reliable. class SendrecvmsgBase(ThreadSafeCleanupTestCase): # Base class for sendmsg()/recvmsg() tests. # Time in seconds to wait before considering a test failed, or # None for no timeout. Not all tests actually set a timeout. fail_timeout = support.LOOPBACK_TIMEOUT def setUp(self): self.misc_event = threading.Event() super().setUp() def sendToServer(self, msg): # Send msg to the server. return self.cli_sock.send(msg) # Tuple of alternative default arguments for sendmsg() when called # via sendmsgToServer() (e.g. to include a destination address). sendmsg_to_server_defaults = () def sendmsgToServer(self, *args): # Call sendmsg() on self.cli_sock with the given arguments, # filling in any arguments which are not supplied with the # corresponding items of self.sendmsg_to_server_defaults, if # any. return self.cli_sock.sendmsg( *(args + self.sendmsg_to_server_defaults[len(args):])) def doRecvmsg(self, sock, bufsize, *args): # Call recvmsg() on sock with given arguments and return its # result. Should be used for tests which can use either # recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides # this method with one which emulates it using recvmsg_into(), # thus allowing the same test to be used for both methods. result = sock.recvmsg(bufsize, *args) self.registerRecvmsgResult(result) return result def registerRecvmsgResult(self, result): # Called by doRecvmsg() with the return value of recvmsg() or # recvmsg_into(). Can be overridden to arrange cleanup based # on the returned ancillary data, for instance. pass def checkRecvmsgAddress(self, addr1, addr2): # Called to compare the received address with the address of # the peer. self.assertEqual(addr1, addr2) # Flags that are normally unset in msg_flags msg_flags_common_unset = 0 for name in ("MSG_CTRUNC", "MSG_OOB"): msg_flags_common_unset |= getattr(socket, name, 0) # Flags that are normally set msg_flags_common_set = 0 # Flags set when a complete record has been received (e.g. MSG_EOR # for SCTP) msg_flags_eor_indicator = 0 # Flags set when a complete record has not been received # (e.g. MSG_TRUNC for datagram sockets) msg_flags_non_eor_indicator = 0 def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0): # Method to check the value of msg_flags returned by recvmsg[_into](). # # Checks that all bits in msg_flags_common_set attribute are # set in "flags" and all bits in msg_flags_common_unset are # unset. # # The "eor" argument specifies whether the flags should # indicate that a full record (or datagram) has been received. # If "eor" is None, no checks are done; otherwise, checks # that: # # * if "eor" is true, all bits in msg_flags_eor_indicator are # set and all bits in msg_flags_non_eor_indicator are unset # # * if "eor" is false, all bits in msg_flags_non_eor_indicator # are set and all bits in msg_flags_eor_indicator are unset # # If "checkset" and/or "checkunset" are supplied, they require # the given bits to be set or unset respectively, overriding # what the attributes require for those bits. # # If any bits are set in "ignore", they will not be checked, # regardless of the other inputs. # # Will raise Exception if the inputs require a bit to be both # set and unset, and it is not ignored. defaultset = self.msg_flags_common_set defaultunset = self.msg_flags_common_unset if eor: defaultset |= self.msg_flags_eor_indicator defaultunset |= self.msg_flags_non_eor_indicator elif eor is not None: defaultset |= self.msg_flags_non_eor_indicator defaultunset |= self.msg_flags_eor_indicator # Function arguments override defaults defaultset &= ~checkunset defaultunset &= ~checkset # Merge arguments with remaining defaults, and check for conflicts checkset |= defaultset checkunset |= defaultunset inboth = checkset & checkunset & ~ignore if inboth: raise Exception("contradictory set, unset requirements for flags " "{0:#x}".format(inboth)) # Compare with given msg_flags value mask = (checkset | checkunset) & ~ignore self.assertEqual(flags & mask, checkset & mask) class RecvmsgIntoMixin(SendrecvmsgBase): # Mixin to implement doRecvmsg() using recvmsg_into(). def doRecvmsg(self, sock, bufsize, *args): buf = bytearray(bufsize) result = sock.recvmsg_into([buf], *args) self.registerRecvmsgResult(result) self.assertGreaterEqual(result[0], 0) self.assertLessEqual(result[0], bufsize) return (bytes(buf[:result[0]]),) + result[1:] class SendrecvmsgDgramFlagsBase(SendrecvmsgBase): # Defines flags to be checked in msg_flags for datagram sockets. @property def msg_flags_non_eor_indicator(self): return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase): # Defines flags to be checked in msg_flags for SCTP sockets. @property def msg_flags_eor_indicator(self): return super().msg_flags_eor_indicator | socket.MSG_EOR class SendrecvmsgConnectionlessBase(SendrecvmsgBase): # Base class for tests on connectionless-mode sockets. Users must # supply sockets on attributes cli and serv to be mapped to # cli_sock and serv_sock respectively. @property def serv_sock(self): return self.serv @property def cli_sock(self): return self.cli @property def sendmsg_to_server_defaults(self): return ([], [], 0, self.serv_addr) def sendToServer(self, msg): return self.cli_sock.sendto(msg, self.serv_addr) class SendrecvmsgConnectedBase(SendrecvmsgBase): # Base class for tests on connected sockets. Users must supply # sockets on attributes serv_conn and cli_conn (representing the # connections *to* the server and the client), to be mapped to # cli_sock and serv_sock respectively. @property def serv_sock(self): return self.cli_conn @property def cli_sock(self): return self.serv_conn def checkRecvmsgAddress(self, addr1, addr2): # Address is currently "unspecified" for a connected socket, # so we don't examine it pass class SendrecvmsgServerTimeoutBase(SendrecvmsgBase): # Base class to set a timeout on server's socket. def setUp(self): super().setUp() self.serv_sock.settimeout(self.fail_timeout) class SendmsgTests(SendrecvmsgServerTimeoutBase): # Tests for sendmsg() which can use any socket type and do not # involve recvmsg() or recvmsg_into(). def testSendmsg(self): # Send a simple message with sendmsg(). self.assertEqual(self.serv_sock.recv(len(MSG)), MSG) def _testSendmsg(self): self.assertEqual(self.sendmsgToServer([MSG]), len(MSG)) def testSendmsgDataGenerator(self): # Send from buffer obtained from a generator (not a sequence). self.assertEqual(self.serv_sock.recv(len(MSG)), MSG) def _testSendmsgDataGenerator(self): self.assertEqual(self.sendmsgToServer((o for o in [MSG])), len(MSG)) def testSendmsgAncillaryGenerator(self): # Gather (empty) ancillary data from a generator. self.assertEqual(self.serv_sock.recv(len(MSG)), MSG) def _testSendmsgAncillaryGenerator(self): self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])), len(MSG)) def testSendmsgArray(self): # Send data from an array instead of the usual bytes object. self.assertEqual(self.serv_sock.recv(len(MSG)), MSG) def _testSendmsgArray(self): self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]), len(MSG)) def testSendmsgGather(self): # Send message data from more than one buffer (gather write). self.assertEqual(self.serv_sock.recv(len(MSG)), MSG) def _testSendmsgGather(self): self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG)) def testSendmsgBadArgs(self): # Check that sendmsg() rejects invalid arguments. self.assertEqual(self.serv_sock.recv(1000), b"done") def _testSendmsgBadArgs(self): self.assertRaises(TypeError, self.cli_sock.sendmsg) self.assertRaises(TypeError, self.sendmsgToServer, b"not in an iterable") self.assertRaises(TypeError, self.sendmsgToServer, object()) self.assertRaises(TypeError, self.sendmsgToServer, [object()]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG, object()]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], object()) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [], object()) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [], 0, object()) self.sendToServer(b"done") def testSendmsgBadCmsg(self): # Check that invalid ancillary data items are rejected. self.assertEqual(self.serv_sock.recv(1000), b"done") def _testSendmsgBadCmsg(self): self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [object()]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [(object(), 0, b"data")]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [(0, object(), b"data")]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [(0, 0, object())]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [(0, 0)]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [(0, 0, b"data", 42)]) self.sendToServer(b"done") @requireAttrs(socket, "CMSG_SPACE") def testSendmsgBadMultiCmsg(self): # Check that invalid ancillary data items are rejected when # more than one item is present. self.assertEqual(self.serv_sock.recv(1000), b"done") @testSendmsgBadMultiCmsg.client_skip def _testSendmsgBadMultiCmsg(self): self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [0, 0, b""]) self.assertRaises(TypeError, self.sendmsgToServer, [MSG], [(0, 0, b""), object()]) self.sendToServer(b"done") def testSendmsgExcessCmsgReject(self): # Check that sendmsg() rejects excess ancillary data items # when the number that can be sent is limited. self.assertEqual(self.serv_sock.recv(1000), b"done") def _testSendmsgExcessCmsgReject(self): if not hasattr(socket, "CMSG_SPACE"): # Can only send one item with self.assertRaises(OSError) as cm: self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")]) self.assertIsNone(cm.exception.errno) self.sendToServer(b"done") def testSendmsgAfterClose(self): # Check that sendmsg() fails on a closed socket. pass def _testSendmsgAfterClose(self): self.cli_sock.close() self.assertRaises(OSError, self.sendmsgToServer, [MSG]) class SendmsgStreamTests(SendmsgTests): # Tests for sendmsg() which require a stream socket and do not # involve recvmsg() or recvmsg_into(). def testSendmsgExplicitNoneAddr(self): # Check that peer address can be specified as None. self.assertEqual(self.serv_sock.recv(len(MSG)), MSG) def _testSendmsgExplicitNoneAddr(self): self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG)) def testSendmsgTimeout(self): # Check that timeout works with sendmsg(). self.assertEqual(self.serv_sock.recv(512), b"a"*512) self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) def _testSendmsgTimeout(self): try: self.cli_sock.settimeout(0.03) try: while True: self.sendmsgToServer([b"a"*512]) except TimeoutError: pass except OSError as exc: if exc.errno != errno.ENOMEM: raise # bpo-33937 the test randomly fails on Travis CI with # "OSError: [Errno 12] Cannot allocate memory" else: self.fail("TimeoutError not raised") finally: self.misc_event.set() # XXX: would be nice to have more tests for sendmsg flags argument. # Linux supports MSG_DONTWAIT when sending, but in general, it # only works when receiving. Could add other platforms if they # support it too. @skipWithClientIf(sys.platform not in {"linux"}, "MSG_DONTWAIT not known to work on this platform when " "sending") def testSendmsgDontWait(self): # Check that MSG_DONTWAIT in flags causes non-blocking behaviour. self.assertEqual(self.serv_sock.recv(512), b"a"*512) self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) @testSendmsgDontWait.client_skip def _testSendmsgDontWait(self): try: with self.assertRaises(OSError) as cm: while True: self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT) # bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI # with "OSError: [Errno 12] Cannot allocate memory" self.assertIn(cm.exception.errno, (errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM)) finally: self.misc_event.set() class SendmsgConnectionlessTests(SendmsgTests): # Tests for sendmsg() which require a connectionless-mode # (e.g. datagram) socket, and do not involve recvmsg() or # recvmsg_into(). def testSendmsgNoDestAddr(self): # Check that sendmsg() fails when no destination address is # given for unconnected socket. pass def _testSendmsgNoDestAddr(self): self.assertRaises(OSError, self.cli_sock.sendmsg, [MSG]) self.assertRaises(OSError, self.cli_sock.sendmsg, [MSG], [], 0, None) class RecvmsgGenericTests(SendrecvmsgBase): # Tests for recvmsg() which can also be emulated using # recvmsg_into(), and can use any socket type. def testRecvmsg(self): # Receive a simple message with recvmsg[_into](). msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG)) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsg(self): self.sendToServer(MSG) def testRecvmsgExplicitDefaults(self): # Test recvmsg[_into]() with default arguments provided explicitly. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), 0, 0) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgExplicitDefaults(self): self.sendToServer(MSG) def testRecvmsgShorter(self): # Receive a message smaller than buffer. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG) + 42) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgShorter(self): self.sendToServer(MSG) def testRecvmsgTrunc(self): # Receive part of message, check for truncation indicators. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG) - 3) self.assertEqual(msg, MSG[:-3]) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=False) def _testRecvmsgTrunc(self): self.sendToServer(MSG) def testRecvmsgShortAncillaryBuf(self): # Test ancillary data buffer too small to hold any ancillary data. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), 1) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgShortAncillaryBuf(self): self.sendToServer(MSG) def testRecvmsgLongAncillaryBuf(self): # Test large ancillary data buffer. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), 10240) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgLongAncillaryBuf(self): self.sendToServer(MSG) def testRecvmsgAfterClose(self): # Check that recvmsg[_into]() fails on a closed socket. self.serv_sock.close() self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024) def _testRecvmsgAfterClose(self): pass def testRecvmsgTimeout(self): # Check that timeout works. try: self.serv_sock.settimeout(0.03) self.assertRaises(TimeoutError, self.doRecvmsg, self.serv_sock, len(MSG)) finally: self.misc_event.set() def _testRecvmsgTimeout(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) @requireAttrs(socket, "MSG_PEEK") def testRecvmsgPeek(self): # Check that MSG_PEEK in flags enables examination of pending # data without consuming it. # Receive part of data with MSG_PEEK. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG) - 3, 0, socket.MSG_PEEK) self.assertEqual(msg, MSG[:-3]) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) # Ignoring MSG_TRUNC here (so this test is the same for stream # and datagram sockets). Some wording in POSIX seems to # suggest that it needn't be set when peeking, but that may # just be a slip. self.checkFlags(flags, eor=False, ignore=getattr(socket, "MSG_TRUNC", 0)) # Receive all data with MSG_PEEK. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), 0, socket.MSG_PEEK) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) # Check that the same data can still be received normally. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG)) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) @testRecvmsgPeek.client_skip def _testRecvmsgPeek(self): self.sendToServer(MSG) @requireAttrs(socket.socket, "sendmsg") def testRecvmsgFromSendmsg(self): # Test receiving with recvmsg[_into]() when message is sent # using sendmsg(). self.serv_sock.settimeout(self.fail_timeout) msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG)) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) @testRecvmsgFromSendmsg.client_skip def _testRecvmsgFromSendmsg(self): self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG)) class RecvmsgGenericStreamTests(RecvmsgGenericTests): # Tests which require a stream socket and can use either recvmsg() # or recvmsg_into(). def testRecvmsgEOF(self): # Receive end-of-stream indicator (b"", peer socket closed). msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024) self.assertEqual(msg, b"") self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=None) # Might not have end-of-record marker def _testRecvmsgEOF(self): self.cli_sock.close() def testRecvmsgOverflow(self): # Receive a message in more than one chunk. seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG) - 3) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=False) seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) msg = seg1 + seg2 self.assertEqual(msg, MSG) def _testRecvmsgOverflow(self): self.sendToServer(MSG) class RecvmsgTests(RecvmsgGenericTests): # Tests for recvmsg() which can use any socket type. def testRecvmsgBadArgs(self): # Check that recvmsg() rejects invalid arguments. self.assertRaises(TypeError, self.serv_sock.recvmsg) self.assertRaises(ValueError, self.serv_sock.recvmsg, -1, 0, 0) self.assertRaises(ValueError, self.serv_sock.recvmsg, len(MSG), -1, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg, [bytearray(10)], 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg, object(), 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg, len(MSG), object(), 0) self.assertRaises(TypeError, self.serv_sock.recvmsg, len(MSG), 0, object()) msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgBadArgs(self): self.sendToServer(MSG) class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests): # Tests for recvmsg_into() which can use any socket type. def testRecvmsgIntoBadArgs(self): # Check that recvmsg_into() rejects invalid arguments. buf = bytearray(len(MSG)) self.assertRaises(TypeError, self.serv_sock.recvmsg_into) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, len(MSG), 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, buf, 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [object()], 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [b"I'm not writable"], 0, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [buf, object()], 0, 0) self.assertRaises(ValueError, self.serv_sock.recvmsg_into, [buf], -1, 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [buf], object(), 0) self.assertRaises(TypeError, self.serv_sock.recvmsg_into, [buf], 0, object()) nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0) self.assertEqual(nbytes, len(MSG)) self.assertEqual(buf, bytearray(MSG)) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoBadArgs(self): self.sendToServer(MSG) def testRecvmsgIntoGenerator(self): # Receive into buffer obtained from a generator (not a sequence). buf = bytearray(len(MSG)) nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into( (o for o in [buf])) self.assertEqual(nbytes, len(MSG)) self.assertEqual(buf, bytearray(MSG)) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoGenerator(self): self.sendToServer(MSG) def testRecvmsgIntoArray(self): # Receive into an array rather than the usual bytearray. buf = array.array("B", [0] * len(MSG)) nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf]) self.assertEqual(nbytes, len(MSG)) self.assertEqual(buf.tobytes(), MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoArray(self): self.sendToServer(MSG) def testRecvmsgIntoScatter(self): # Receive into multiple buffers (scatter write). b1 = bytearray(b"----") b2 = bytearray(b"0123456789") b3 = bytearray(b"--------------") nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into( [b1, memoryview(b2)[2:9], b3]) self.assertEqual(nbytes, len(b"Mary had a little lamb")) self.assertEqual(b1, bytearray(b"Mary")) self.assertEqual(b2, bytearray(b"01 had a 9")) self.assertEqual(b3, bytearray(b"little lamb---")) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True) def _testRecvmsgIntoScatter(self): self.sendToServer(b"Mary had a little lamb") class CmsgMacroTests(unittest.TestCase): # Test the functions CMSG_LEN() and CMSG_SPACE(). Tests # assumptions used by sendmsg() and recvmsg[_into](), which share # code with these functions. # Match the definition in socketmodule.c try: import _testcapi except ImportError: socklen_t_limit = 0x7fffffff else: socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX) @requireAttrs(socket, "CMSG_LEN") def testCMSG_LEN(self): # Test CMSG_LEN() with various valid and invalid values, # checking the assumptions used by recvmsg() and sendmsg(). toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1 values = list(range(257)) + list(range(toobig - 257, toobig)) # struct cmsghdr has at least three members, two of which are ints self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2) for n in values: ret = socket.CMSG_LEN(n) # This is how recvmsg() calculates the data size self.assertEqual(ret - socket.CMSG_LEN(0), n) self.assertLessEqual(ret, self.socklen_t_limit) self.assertRaises(OverflowError, socket.CMSG_LEN, -1) # sendmsg() shares code with these functions, and requires # that it reject values over the limit. self.assertRaises(OverflowError, socket.CMSG_LEN, toobig) self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize) @requireAttrs(socket, "CMSG_SPACE") def testCMSG_SPACE(self): # Test CMSG_SPACE() with various valid and invalid values, # checking the assumptions used by sendmsg(). toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1 values = list(range(257)) + list(range(toobig - 257, toobig)) last = socket.CMSG_SPACE(0) # struct cmsghdr has at least three members, two of which are ints self.assertGreater(last, array.array("i").itemsize * 2) for n in values: ret = socket.CMSG_SPACE(n) self.assertGreaterEqual(ret, last) self.assertGreaterEqual(ret, socket.CMSG_LEN(n)) self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0)) self.assertLessEqual(ret, self.socklen_t_limit) last = ret self.assertRaises(OverflowError, socket.CMSG_SPACE, -1) # sendmsg() shares code with these functions, and requires # that it reject values over the limit. self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig) self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize) class SCMRightsTest(SendrecvmsgServerTimeoutBase): # Tests for file descriptor passing on Unix-domain sockets. # Invalid file descriptor value that's unlikely to evaluate to a # real FD even if one of its bytes is replaced with a different # value (which shouldn't actually happen). badfd = -0x5555 def newFDs(self, n): # Return a list of n file descriptors for newly-created files # containing their list indices as ASCII numbers. fds = [] for i in range(n): fd, path = tempfile.mkstemp() self.addCleanup(os.unlink, path) self.addCleanup(os.close, fd) os.write(fd, str(i).encode()) fds.append(fd) return fds def checkFDs(self, fds): # Check that the file descriptors in the given list contain # their correct list indices as ASCII numbers. for n, fd in enumerate(fds): os.lseek(fd, 0, os.SEEK_SET) self.assertEqual(os.read(fd, 1024), str(n).encode()) def registerRecvmsgResult(self, result): self.addCleanup(self.closeRecvmsgFDs, result) def closeRecvmsgFDs(self, recvmsg_result): # Close all file descriptors specified in the ancillary data # of the given return value from recvmsg() or recvmsg_into(). for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]: if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): fds = array.array("i") fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) for fd in fds: os.close(fd) def createAndSendFDs(self, n): # Send n new file descriptors created by newFDs() to the # server, with the constant MSG as the non-ancillary data. self.assertEqual( self.sendmsgToServer([MSG], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", self.newFDs(n)))]), len(MSG)) def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0): # Check that constant MSG was received with numfds file # descriptors in a maximum of maxcmsgs control messages (which # must contain only complete integers). By default, check # that MSG_CTRUNC is unset, but ignore any flags in # ignoreflags. msg, ancdata, flags, addr = result self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC, ignore=ignoreflags) self.assertIsInstance(ancdata, list) self.assertLessEqual(len(ancdata), maxcmsgs) fds = array.array("i") for item in ancdata: self.assertIsInstance(item, tuple) cmsg_level, cmsg_type, cmsg_data = item self.assertEqual(cmsg_level, socket.SOL_SOCKET) self.assertEqual(cmsg_type, socket.SCM_RIGHTS) self.assertIsInstance(cmsg_data, bytes) self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0) fds.frombytes(cmsg_data) self.assertEqual(len(fds), numfds) self.checkFDs(fds) def testFDPassSimple(self): # Pass a single FD (array read from bytes object). self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock, len(MSG), 10240)) def _testFDPassSimple(self): self.assertEqual( self.sendmsgToServer( [MSG], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", self.newFDs(1)).tobytes())]), len(MSG)) def testMultipleFDPass(self): # Pass multiple FDs in a single array. self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock, len(MSG), 10240)) def _testMultipleFDPass(self): self.createAndSendFDs(4) @requireAttrs(socket, "CMSG_SPACE") def testFDPassCMSG_SPACE(self): # Test using CMSG_SPACE() to calculate ancillary buffer size. self.checkRecvmsgFDs( 4, self.doRecvmsg(self.serv_sock, len(MSG), socket.CMSG_SPACE(4 * SIZEOF_INT))) @testFDPassCMSG_SPACE.client_skip def _testFDPassCMSG_SPACE(self): self.createAndSendFDs(4) def testFDPassCMSG_LEN(self): # Test using CMSG_LEN() to calculate ancillary buffer size. self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock, len(MSG), socket.CMSG_LEN(4 * SIZEOF_INT)), # RFC 3542 says implementations may set # MSG_CTRUNC if there isn't enough space # for trailing padding. ignoreflags=socket.MSG_CTRUNC) def _testFDPassCMSG_LEN(self): self.createAndSendFDs(1) @unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958") @unittest.skipIf(AIX, "skipping, see issue #22397") @requireAttrs(socket, "CMSG_SPACE") def testFDPassSeparate(self): # Pass two FDs in two separate arrays. Arrays may be combined # into a single control message by the OS. self.checkRecvmsgFDs(2, self.doRecvmsg(self.serv_sock, len(MSG), 10240), maxcmsgs=2) @testFDPassSeparate.client_skip @unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958") @unittest.skipIf(AIX, "skipping, see issue #22397") def _testFDPassSeparate(self): fd0, fd1 = self.newFDs(2) self.assertEqual( self.sendmsgToServer([MSG], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [fd0])), (socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [fd1]))]), len(MSG)) @unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958") @unittest.skipIf(AIX, "skipping, see issue #22397") @requireAttrs(socket, "CMSG_SPACE") def testFDPassSeparateMinSpace(self): # Pass two FDs in two separate arrays, receiving them into the # minimum space for two arrays. num_fds = 2 self.checkRecvmsgFDs(num_fds, self.doRecvmsg(self.serv_sock, len(MSG), socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT * num_fds)), maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC) @testFDPassSeparateMinSpace.client_skip @unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958") @unittest.skipIf(AIX, "skipping, see issue #22397") def _testFDPassSeparateMinSpace(self): fd0, fd1 = self.newFDs(2) self.assertEqual( self.sendmsgToServer([MSG], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [fd0])), (socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [fd1]))]), len(MSG)) def sendAncillaryIfPossible(self, msg, ancdata): # Try to send msg and ancdata to server, but if the system # call fails, just send msg with no ancillary data. try: nbytes = self.sendmsgToServer([msg], ancdata) except OSError as e: # Check that it was the system call that failed self.assertIsInstance(e.errno, int) nbytes = self.sendmsgToServer([msg]) self.assertEqual(nbytes, len(msg)) @unittest.skipIf(sys.platform == "darwin", "see issue #24725") def testFDPassEmpty(self): # Try to pass an empty FD array. Can receive either no array # or an empty array. self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock, len(MSG), 10240), ignoreflags=socket.MSG_CTRUNC) def _testFDPassEmpty(self): self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET, socket.SCM_RIGHTS, b"")]) def testFDPassPartialInt(self): # Try to pass a truncated FD array. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), 10240) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC) self.assertLessEqual(len(ancdata), 1) for cmsg_level, cmsg_type, cmsg_data in ancdata: self.assertEqual(cmsg_level, socket.SOL_SOCKET) self.assertEqual(cmsg_type, socket.SCM_RIGHTS) self.assertLess(len(cmsg_data), SIZEOF_INT) def _testFDPassPartialInt(self): self.sendAncillaryIfPossible( MSG, [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [self.badfd]).tobytes()[:-1])]) @requireAttrs(socket, "CMSG_SPACE") def testFDPassPartialIntInMiddle(self): # Try to pass two FD arrays, the first of which is truncated. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), 10240) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC) self.assertLessEqual(len(ancdata), 2) fds = array.array("i") # Arrays may have been combined in a single control message for cmsg_level, cmsg_type, cmsg_data in ancdata: self.assertEqual(cmsg_level, socket.SOL_SOCKET) self.assertEqual(cmsg_type, socket.SCM_RIGHTS) fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) self.assertLessEqual(len(fds), 2) self.checkFDs(fds) @testFDPassPartialIntInMiddle.client_skip def _testFDPassPartialIntInMiddle(self): fd0, fd1 = self.newFDs(2) self.sendAncillaryIfPossible( MSG, [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [fd0, self.badfd]).tobytes()[:-1]), (socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [fd1]))]) def checkTruncatedHeader(self, result, ignoreflags=0): # Check that no ancillary data items are returned when data is # truncated inside the cmsghdr structure. msg, ancdata, flags, addr = result self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC, ignore=ignoreflags) def testCmsgTruncNoBufSize(self): # Check that no ancillary data is received when no buffer size # is specified. self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)), # BSD seems to set MSG_CTRUNC only # if an item has been partially # received. ignoreflags=socket.MSG_CTRUNC) def _testCmsgTruncNoBufSize(self): self.createAndSendFDs(1) def testCmsgTrunc0(self): # Check that no ancillary data is received when buffer size is 0. self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0), ignoreflags=socket.MSG_CTRUNC) def _testCmsgTrunc0(self): self.createAndSendFDs(1) # Check that no ancillary data is returned for various non-zero # (but still too small) buffer sizes. def testCmsgTrunc1(self): self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1)) def _testCmsgTrunc1(self): self.createAndSendFDs(1) def testCmsgTrunc2Int(self): # The cmsghdr structure has at least three members, two of # which are ints, so we still shouldn't see any ancillary # data. self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), SIZEOF_INT * 2)) def _testCmsgTrunc2Int(self): self.createAndSendFDs(1) def testCmsgTruncLen0Minus1(self): self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), socket.CMSG_LEN(0) - 1)) def _testCmsgTruncLen0Minus1(self): self.createAndSendFDs(1) # The following tests try to truncate the control message in the # middle of the FD array. def checkTruncatedArray(self, ancbuf, maxdata, mindata=0): # Check that file descriptor data is truncated to between # mindata and maxdata bytes when received with buffer size # ancbuf, and that any complete file descriptor numbers are # valid. msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), ancbuf) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC) if mindata == 0 and ancdata == []: return self.assertEqual(len(ancdata), 1) cmsg_level, cmsg_type, cmsg_data = ancdata[0] self.assertEqual(cmsg_level, socket.SOL_SOCKET) self.assertEqual(cmsg_type, socket.SCM_RIGHTS) self.assertGreaterEqual(len(cmsg_data), mindata) self.assertLessEqual(len(cmsg_data), maxdata) fds = array.array("i") fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) self.checkFDs(fds) def testCmsgTruncLen0(self): self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0) def _testCmsgTruncLen0(self): self.createAndSendFDs(1) def testCmsgTruncLen0Plus1(self): self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1) def _testCmsgTruncLen0Plus1(self): self.createAndSendFDs(2) def testCmsgTruncLen1(self): self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT), maxdata=SIZEOF_INT) def _testCmsgTruncLen1(self): self.createAndSendFDs(2) def testCmsgTruncLen2Minus1(self): self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1, maxdata=(2 * SIZEOF_INT) - 1) def _testCmsgTruncLen2Minus1(self): self.createAndSendFDs(2) class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase): # Test sendmsg() and recvmsg[_into]() using the ancillary data # features of the RFC 3542 Advanced Sockets API for IPv6. # Currently we can only handle certain data items (e.g. traffic # class, hop limit, MTU discovery and fragmentation settings) # without resorting to unportable means such as the struct module, # but the tests here are aimed at testing the ancillary data # handling in sendmsg() and recvmsg() rather than the IPv6 API # itself. # Test value to use when setting hop limit of packet hop_limit = 2 # Test value to use when setting traffic class of packet. # -1 means "use kernel default". traffic_class = -1 def ancillaryMapping(self, ancdata): # Given ancillary data list ancdata, return a mapping from # pairs (cmsg_level, cmsg_type) to corresponding cmsg_data. # Check that no (level, type) pair appears more than once. d = {} for cmsg_level, cmsg_type, cmsg_data in ancdata: self.assertNotIn((cmsg_level, cmsg_type), d) d[(cmsg_level, cmsg_type)] = cmsg_data return d def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0): # Receive hop limit into ancbufsize bytes of ancillary data # space. Check that data is MSG, ancillary data is not # truncated (but ignore any flags in ignoreflags), and hop # limit is between 0 and maxhop inclusive. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), ancbufsize) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC, ignore=ignoreflags) self.assertEqual(len(ancdata), 1) self.assertIsInstance(ancdata[0], tuple) cmsg_level, cmsg_type, cmsg_data = ancdata[0] self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT) self.assertIsInstance(cmsg_data, bytes) self.assertEqual(len(cmsg_data), SIZEOF_INT) a = array.array("i") a.frombytes(cmsg_data) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], maxhop) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testRecvHopLimit(self): # Test receiving the packet hop limit as ancillary data. self.checkHopLimit(ancbufsize=10240) @testRecvHopLimit.client_skip def _testRecvHopLimit(self): # Need to wait until server has asked to receive ancillary # data, as implementations are not required to buffer it # otherwise. self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testRecvHopLimitCMSG_SPACE(self): # Test receiving hop limit, using CMSG_SPACE to calculate buffer size. self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT)) @testRecvHopLimitCMSG_SPACE.client_skip def _testRecvHopLimitCMSG_SPACE(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) # Could test receiving into buffer sized using CMSG_LEN, but RFC # 3542 says portable applications must provide space for trailing # padding. Implementations may set MSG_CTRUNC if there isn't # enough space for the padding. @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSetHopLimit(self): # Test setting hop limit on outgoing packet and receiving it # at the other end. self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit) @testSetHopLimit.client_skip def _testSetHopLimit(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.assertEqual( self.sendmsgToServer([MSG], [(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]), len(MSG)) def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0): # Receive traffic class and hop limit into ancbufsize bytes of # ancillary data space. Check that data is MSG, ancillary # data is not truncated (but ignore any flags in ignoreflags), # and traffic class and hop limit are in range (hop limit no # more than maxhop). self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVTCLASS, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), ancbufsize) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC, ignore=ignoreflags) self.assertEqual(len(ancdata), 2) ancmap = self.ancillaryMapping(ancdata) tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)] self.assertEqual(len(tcdata), SIZEOF_INT) a = array.array("i") a.frombytes(tcdata) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], 255) hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)] self.assertEqual(len(hldata), SIZEOF_INT) a = array.array("i") a.frombytes(hldata) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], maxhop) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testRecvTrafficClassAndHopLimit(self): # Test receiving traffic class and hop limit as ancillary data. self.checkTrafficClassAndHopLimit(ancbufsize=10240) @testRecvTrafficClassAndHopLimit.client_skip def _testRecvTrafficClassAndHopLimit(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testRecvTrafficClassAndHopLimitCMSG_SPACE(self): # Test receiving traffic class and hop limit, using # CMSG_SPACE() to calculate buffer size. self.checkTrafficClassAndHopLimit( ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2) @testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSetTrafficClassAndHopLimit(self): # Test setting traffic class and hop limit on outgoing packet, # and receiving them at the other end. self.checkTrafficClassAndHopLimit(ancbufsize=10240, maxhop=self.hop_limit) @testSetTrafficClassAndHopLimit.client_skip def _testSetTrafficClassAndHopLimit(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.assertEqual( self.sendmsgToServer([MSG], [(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, array.array("i", [self.traffic_class])), (socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]), len(MSG)) @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testOddCmsgSize(self): # Try to send ancillary data with first item one byte too # long. Fall back to sending with correct size if this fails, # and check that second item was handled correctly. self.checkTrafficClassAndHopLimit(ancbufsize=10240, maxhop=self.hop_limit) @testOddCmsgSize.client_skip def _testOddCmsgSize(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) try: nbytes = self.sendmsgToServer( [MSG], [(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, array.array("i", [self.traffic_class]).tobytes() + b"\x00"), (socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]) except OSError as e: self.assertIsInstance(e.errno, int) nbytes = self.sendmsgToServer( [MSG], [(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, array.array("i", [self.traffic_class])), (socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT, array.array("i", [self.hop_limit]))]) self.assertEqual(nbytes, len(MSG)) # Tests for proper handling of truncated ancillary data def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0): # Receive hop limit into ancbufsize bytes of ancillary data # space, which should be too small to contain the ancillary # data header (if ancbufsize is None, pass no second argument # to recvmsg()). Check that data is MSG, MSG_CTRUNC is set # (unless included in ignoreflags), and no ancillary data is # returned. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.misc_event.set() args = () if ancbufsize is None else (ancbufsize,) msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), *args) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.assertEqual(ancdata, []) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC, ignore=ignoreflags) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testCmsgTruncNoBufSize(self): # Check that no ancillary data is received when no ancillary # buffer size is provided. self.checkHopLimitTruncatedHeader(ancbufsize=None, # BSD seems to set # MSG_CTRUNC only if an item # has been partially # received. ignoreflags=socket.MSG_CTRUNC) @testCmsgTruncNoBufSize.client_skip def _testCmsgTruncNoBufSize(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTrunc0(self): # Check that no ancillary data is received when ancillary # buffer size is zero. self.checkHopLimitTruncatedHeader(ancbufsize=0, ignoreflags=socket.MSG_CTRUNC) @testSingleCmsgTrunc0.client_skip def _testSingleCmsgTrunc0(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) # Check that no ancillary data is returned for various non-zero # (but still too small) buffer sizes. @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTrunc1(self): self.checkHopLimitTruncatedHeader(ancbufsize=1) @testSingleCmsgTrunc1.client_skip def _testSingleCmsgTrunc1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTrunc2Int(self): self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT) @testSingleCmsgTrunc2Int.client_skip def _testSingleCmsgTrunc2Int(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTruncLen0Minus1(self): self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1) @testSingleCmsgTruncLen0Minus1.client_skip def _testSingleCmsgTruncLen0Minus1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT") def testSingleCmsgTruncInData(self): # Test truncation of a control message inside its associated # data. The message may be returned with its data truncated, # or not returned at all. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg( self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC) self.assertLessEqual(len(ancdata), 1) if ancdata: cmsg_level, cmsg_type, cmsg_data = ancdata[0] self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT) self.assertLess(len(cmsg_data), SIZEOF_INT) @testSingleCmsgTruncInData.client_skip def _testSingleCmsgTruncInData(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0): # Receive traffic class and hop limit into ancbufsize bytes of # ancillary data space, which should be large enough to # contain the first item, but too small to contain the header # of the second. Check that data is MSG, MSG_CTRUNC is set # (unless included in ignoreflags), and only one ancillary # data item is returned. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVTCLASS, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG), ancbufsize) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC, ignore=ignoreflags) self.assertEqual(len(ancdata), 1) cmsg_level, cmsg_type, cmsg_data = ancdata[0] self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}) self.assertEqual(len(cmsg_data), SIZEOF_INT) a = array.array("i") a.frombytes(cmsg_data) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], 255) # Try the above test with various buffer sizes. @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTrunc0(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT), ignoreflags=socket.MSG_CTRUNC) @testSecondCmsgTrunc0.client_skip def _testSecondCmsgTrunc0(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTrunc1(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1) @testSecondCmsgTrunc1.client_skip def _testSecondCmsgTrunc1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTrunc2Int(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 2 * SIZEOF_INT) @testSecondCmsgTrunc2Int.client_skip def _testSecondCmsgTrunc2Int(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTruncLen0Minus1(self): self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(0) - 1) @testSecondCmsgTruncLen0Minus1.client_skip def _testSecondCmsgTruncLen0Minus1(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) @requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT", "IPV6_RECVTCLASS", "IPV6_TCLASS") def testSecondCmsgTruncInData(self): # Test truncation of the second of two control messages inside # its associated data. self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVHOPLIMIT, 1) self.serv_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_RECVTCLASS, 1) self.misc_event.set() msg, ancdata, flags, addr = self.doRecvmsg( self.serv_sock, len(MSG), socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1) self.assertEqual(msg, MSG) self.checkRecvmsgAddress(addr, self.cli_addr) self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC) cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT} cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0) self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) cmsg_types.remove(cmsg_type) self.assertEqual(len(cmsg_data), SIZEOF_INT) a = array.array("i") a.frombytes(cmsg_data) self.assertGreaterEqual(a[0], 0) self.assertLessEqual(a[0], 255) if ancdata: cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0) self.assertEqual(cmsg_level, socket.IPPROTO_IPV6) cmsg_types.remove(cmsg_type) self.assertLess(len(cmsg_data), SIZEOF_INT) self.assertEqual(ancdata, []) @testSecondCmsgTruncInData.client_skip def _testSecondCmsgTruncInData(self): self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout)) self.sendToServer(MSG) # Derive concrete test classes for different socket types. class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase, SendrecvmsgConnectionlessBase, ThreadedSocketTestMixin, UDPTestBase): pass @requireAttrs(socket.socket, "sendmsg") class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase): pass @requireAttrs(socket.socket, "recvmsg") class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase): pass @requireAttrs(socket.socket, "recvmsg_into") class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase): pass class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase, SendrecvmsgConnectionlessBase, ThreadedSocketTestMixin, UDP6TestBase): def checkRecvmsgAddress(self, addr1, addr2): # Called to compare the received address with the address of # the peer, ignoring scope ID self.assertEqual(addr1[:-1], addr2[:-1]) @requireAttrs(socket.socket, "sendmsg") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @requireSocket("AF_INET6", "SOCK_DGRAM") class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase): pass @requireAttrs(socket.socket, "recvmsg") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase): pass @requireAttrs(socket.socket, "recvmsg_into") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase): pass @requireAttrs(socket.socket, "recvmsg") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @requireAttrs(socket, "IPPROTO_IPV6") @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest, SendrecvmsgUDP6TestBase): pass @requireAttrs(socket.socket, "recvmsg_into") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @requireAttrs(socket, "IPPROTO_IPV6") @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin, RFC3542AncillaryTest, SendrecvmsgUDP6TestBase): pass @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') class SendrecvmsgUDPLITETestBase(SendrecvmsgDgramFlagsBase, SendrecvmsgConnectionlessBase, ThreadedSocketTestMixin, UDPLITETestBase): pass @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireAttrs(socket.socket, "sendmsg") class SendmsgUDPLITETest(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase): pass @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireAttrs(socket.socket, "recvmsg") class RecvmsgUDPLITETest(RecvmsgTests, SendrecvmsgUDPLITETestBase): pass @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireAttrs(socket.socket, "recvmsg_into") class RecvmsgIntoUDPLITETest(RecvmsgIntoTests, SendrecvmsgUDPLITETestBase): pass @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') class SendrecvmsgUDPLITE6TestBase(SendrecvmsgDgramFlagsBase, SendrecvmsgConnectionlessBase, ThreadedSocketTestMixin, UDPLITE6TestBase): def checkRecvmsgAddress(self, addr1, addr2): # Called to compare the received address with the address of # the peer, ignoring scope ID self.assertEqual(addr1[:-1], addr2[:-1]) @requireAttrs(socket.socket, "sendmsg") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireSocket("AF_INET6", "SOCK_DGRAM") class SendmsgUDPLITE6Test(SendmsgConnectionlessTests, SendrecvmsgUDPLITE6TestBase): pass @requireAttrs(socket.socket, "recvmsg") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgUDPLITE6Test(RecvmsgTests, SendrecvmsgUDPLITE6TestBase): pass @requireAttrs(socket.socket, "recvmsg_into") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgIntoUDPLITE6Test(RecvmsgIntoTests, SendrecvmsgUDPLITE6TestBase): pass @requireAttrs(socket.socket, "recvmsg") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireAttrs(socket, "IPPROTO_IPV6") @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgRFC3542AncillaryUDPLITE6Test(RFC3542AncillaryTest, SendrecvmsgUDPLITE6TestBase): pass @requireAttrs(socket.socket, "recvmsg_into") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.') @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') @requireAttrs(socket, "IPPROTO_IPV6") @requireSocket("AF_INET6", "SOCK_DGRAM") class RecvmsgIntoRFC3542AncillaryUDPLITE6Test(RecvmsgIntoMixin, RFC3542AncillaryTest, SendrecvmsgUDPLITE6TestBase): pass class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase, ConnectedStreamTestMixin, TCPTestBase): pass @requireAttrs(socket.socket, "sendmsg") class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase): pass @requireAttrs(socket.socket, "recvmsg") class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests, SendrecvmsgTCPTestBase): pass @requireAttrs(socket.socket, "recvmsg_into") class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests, SendrecvmsgTCPTestBase): pass class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase, SendrecvmsgConnectedBase, ConnectedStreamTestMixin, SCTPStreamBase): pass @requireAttrs(socket.socket, "sendmsg") @unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX") @requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP") class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase): pass @requireAttrs(socket.socket, "recvmsg") @unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX") @requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP") class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests, SendrecvmsgSCTPStreamTestBase): def testRecvmsgEOF(self): try: super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF() except OSError as e: if e.errno != errno.ENOTCONN: raise self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876") @requireAttrs(socket.socket, "recvmsg_into") @unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX") @requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP") class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests, SendrecvmsgSCTPStreamTestBase): def testRecvmsgEOF(self): try: super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF() except OSError as e: if e.errno != errno.ENOTCONN: raise self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876") class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase, ConnectedStreamTestMixin, UnixStreamBase): pass @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "AF_UNIX") class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase): pass @requireAttrs(socket.socket, "recvmsg") @requireAttrs(socket, "AF_UNIX") class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests, SendrecvmsgUnixStreamTestBase): pass @requireAttrs(socket.socket, "recvmsg_into") @requireAttrs(socket, "AF_UNIX") class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests, SendrecvmsgUnixStreamTestBase): pass @requireAttrs(socket.socket, "sendmsg", "recvmsg") @requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS") class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase): pass @requireAttrs(socket.socket, "sendmsg", "recvmsg_into") @requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS") class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest, SendrecvmsgUnixStreamTestBase): pass # Test interrupting the interruptible send/receive methods with a # signal when a timeout is set. These tests avoid having multiple # threads alive during the test so that the OS cannot deliver the # signal to the wrong one. class InterruptedTimeoutBase(unittest.TestCase): # Base class for interrupted send/receive tests. Installs an # empty handler for SIGALRM and removes it on teardown, along with # any scheduled alarms. def setUp(self): super().setUp() orig_alrm_handler = signal.signal(signal.SIGALRM, lambda signum, frame: 1 / 0) self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) # Timeout for socket operations timeout = support.LOOPBACK_TIMEOUT # Provide setAlarm() method to schedule delivery of SIGALRM after # given number of seconds, or cancel it if zero, and an # appropriate time value to use. Use setitimer() if available. if hasattr(signal, "setitimer"): alarm_time = 0.05 def setAlarm(self, seconds): signal.setitimer(signal.ITIMER_REAL, seconds) else: # Old systems may deliver the alarm up to one second early alarm_time = 2 def setAlarm(self, seconds): signal.alarm(seconds) # Require siginterrupt() in order to ensure that system calls are # interrupted by default. @requireAttrs(signal, "siginterrupt") @unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"), "Don't have signal.alarm or signal.setitimer") class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase): # Test interrupting the recv*() methods with signals when a # timeout is set. def setUp(self): super().setUp() self.serv.settimeout(self.timeout) def checkInterruptedRecv(self, func, *args, **kwargs): # Check that func(*args, **kwargs) raises # errno of EINTR when interrupted by a signal. try: self.setAlarm(self.alarm_time) with self.assertRaises(ZeroDivisionError) as cm: func(*args, **kwargs) finally: self.setAlarm(0) def testInterruptedRecvTimeout(self): self.checkInterruptedRecv(self.serv.recv, 1024) def testInterruptedRecvIntoTimeout(self): self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024)) def testInterruptedRecvfromTimeout(self): self.checkInterruptedRecv(self.serv.recvfrom, 1024) def testInterruptedRecvfromIntoTimeout(self): self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024)) @requireAttrs(socket.socket, "recvmsg") def testInterruptedRecvmsgTimeout(self): self.checkInterruptedRecv(self.serv.recvmsg, 1024) @requireAttrs(socket.socket, "recvmsg_into") def testInterruptedRecvmsgIntoTimeout(self): self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)]) # Require siginterrupt() in order to ensure that system calls are # interrupted by default. @requireAttrs(signal, "siginterrupt") @unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"), "Don't have signal.alarm or signal.setitimer") class InterruptedSendTimeoutTest(InterruptedTimeoutBase, ThreadSafeCleanupTestCase, SocketListeningTestMixin, TCPTestBase): # Test interrupting the interruptible send*() methods with signals # when a timeout is set. def setUp(self): super().setUp() self.serv_conn = self.newSocket() self.addCleanup(self.serv_conn.close) # Use a thread to complete the connection, but wait for it to # terminate before running the test, so that there is only one # thread to accept the signal. cli_thread = threading.Thread(target=self.doConnect) cli_thread.start() self.cli_conn, addr = self.serv.accept() self.addCleanup(self.cli_conn.close) cli_thread.join() self.serv_conn.settimeout(self.timeout) def doConnect(self): self.serv_conn.connect(self.serv_addr) def checkInterruptedSend(self, func, *args, **kwargs): # Check that func(*args, **kwargs), run in a loop, raises # OSError with an errno of EINTR when interrupted by a # signal. try: with self.assertRaises(ZeroDivisionError) as cm: while True: self.setAlarm(self.alarm_time) func(*args, **kwargs) finally: self.setAlarm(0) # Issue #12958: The following tests have problems on OS X prior to 10.7 @support.requires_mac_ver(10, 7) def testInterruptedSendTimeout(self): self.checkInterruptedSend(self.serv_conn.send, b"a"*512) @support.requires_mac_ver(10, 7) def testInterruptedSendtoTimeout(self): # Passing an actual address here as Python's wrapper for # sendto() doesn't allow passing a zero-length one; POSIX # requires that the address is ignored since the socket is # connection-mode, however. self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512, self.serv_addr) @support.requires_mac_ver(10, 7) @requireAttrs(socket.socket, "sendmsg") def testInterruptedSendmsgTimeout(self): self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512]) class TCPCloserTest(ThreadedTCPSocketTest): def testClose(self): conn, addr = self.serv.accept() conn.close() sd = self.cli read, write, err = select.select([sd], [], [], 1.0) self.assertEqual(read, [sd]) self.assertEqual(sd.recv(1), b'') # Calling close() many times should be safe. conn.close() conn.close() def _testClose(self): self.cli.connect((HOST, self.port)) time.sleep(1.0) class BasicSocketPairTest(SocketPairTest): def __init__(self, methodName='runTest'): SocketPairTest.__init__(self, methodName=methodName) def _check_defaults(self, sock): self.assertIsInstance(sock, socket.socket) if hasattr(socket, 'AF_UNIX'): self.assertEqual(sock.family, socket.AF_UNIX) else: self.assertEqual(sock.family, socket.AF_INET) self.assertEqual(sock.type, socket.SOCK_STREAM) self.assertEqual(sock.proto, 0) def _testDefaults(self): self._check_defaults(self.cli) def testDefaults(self): self._check_defaults(self.serv) def testRecv(self): msg = self.serv.recv(1024) self.assertEqual(msg, MSG) def _testRecv(self): self.cli.send(MSG) def testSend(self): self.serv.send(MSG) def _testSend(self): msg = self.cli.recv(1024) self.assertEqual(msg, MSG) class NonBlockingTCPTests(ThreadedTCPSocketTest): def __init__(self, methodName='runTest'): self.event = threading.Event() ThreadedTCPSocketTest.__init__(self, methodName=methodName) def assert_sock_timeout(self, sock, timeout): self.assertEqual(self.serv.gettimeout(), timeout) blocking = (timeout != 0.0) self.assertEqual(sock.getblocking(), blocking) if fcntl is not None: # When a Python socket has a non-zero timeout, it's switched # internally to a non-blocking mode. Later, sock.sendall(), # sock.recv(), and other socket operations use a select() call and # handle EWOULDBLOCK/EGAIN on all socket operations. That's how # timeouts are enforced. fd_blocking = (timeout is None) flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK) self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking) def testSetBlocking(self): # Test setblocking() and settimeout() methods self.serv.setblocking(True) self.assert_sock_timeout(self.serv, None) self.serv.setblocking(False) self.assert_sock_timeout(self.serv, 0.0) self.serv.settimeout(None) self.assert_sock_timeout(self.serv, None) self.serv.settimeout(0) self.assert_sock_timeout(self.serv, 0) self.serv.settimeout(10) self.assert_sock_timeout(self.serv, 10) self.serv.settimeout(0) self.assert_sock_timeout(self.serv, 0) def _testSetBlocking(self): pass @support.cpython_only def testSetBlocking_overflow(self): # Issue 15989 import _testcapi if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX: self.skipTest('needs UINT_MAX < ULONG_MAX') self.serv.setblocking(False) self.assertEqual(self.serv.gettimeout(), 0.0) self.serv.setblocking(_testcapi.UINT_MAX + 1) self.assertIsNone(self.serv.gettimeout()) _testSetBlocking_overflow = support.cpython_only(_testSetBlocking) @unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'), 'test needs socket.SOCK_NONBLOCK') @support.requires_linux_version(2, 6, 28) def testInitNonBlocking(self): # create a socket with SOCK_NONBLOCK self.serv.close() self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK) self.assert_sock_timeout(self.serv, 0) def _testInitNonBlocking(self): pass def testInheritFlagsBlocking(self): # bpo-7995: accept() on a listening socket with a timeout and the # default timeout is None, the resulting socket must be blocking. with socket_setdefaulttimeout(None): self.serv.settimeout(10) conn, addr = self.serv.accept() self.addCleanup(conn.close) self.assertIsNone(conn.gettimeout()) def _testInheritFlagsBlocking(self): self.cli.connect((HOST, self.port)) def testInheritFlagsTimeout(self): # bpo-7995: accept() on a listening socket with a timeout and the # default timeout is None, the resulting socket must inherit # the default timeout. default_timeout = 20.0 with socket_setdefaulttimeout(default_timeout): self.serv.settimeout(10) conn, addr = self.serv.accept() self.addCleanup(conn.close) self.assertEqual(conn.gettimeout(), default_timeout) def _testInheritFlagsTimeout(self): self.cli.connect((HOST, self.port)) def testAccept(self): # Testing non-blocking accept self.serv.setblocking(False) # connect() didn't start: non-blocking accept() fails start_time = time.monotonic() with self.assertRaises(BlockingIOError): conn, addr = self.serv.accept() dt = time.monotonic() - start_time self.assertLess(dt, 1.0) self.event.set() read, write, err = select.select([self.serv], [], [], support.LONG_TIMEOUT) if self.serv not in read: self.fail("Error trying to do accept after select.") # connect() completed: non-blocking accept() doesn't block conn, addr = self.serv.accept() self.addCleanup(conn.close) self.assertIsNone(conn.gettimeout()) def _testAccept(self): # don't connect before event is set to check # that non-blocking accept() raises BlockingIOError self.event.wait() self.cli.connect((HOST, self.port)) def testRecv(self): # Testing non-blocking recv conn, addr = self.serv.accept() self.addCleanup(conn.close) conn.setblocking(False) # the server didn't send data yet: non-blocking recv() fails with self.assertRaises(BlockingIOError): msg = conn.recv(len(MSG)) self.event.set() read, write, err = select.select([conn], [], [], support.LONG_TIMEOUT) if conn not in read: self.fail("Error during select call to non-blocking socket.") # the server sent data yet: non-blocking recv() doesn't block msg = conn.recv(len(MSG)) self.assertEqual(msg, MSG) def _testRecv(self): self.cli.connect((HOST, self.port)) # don't send anything before event is set to check # that non-blocking recv() raises BlockingIOError self.event.wait() # send data: recv() will no longer block self.cli.sendall(MSG) class FileObjectClassTestCase(SocketConnectedTest): """Unit tests for the object returned by socket.makefile() self.read_file is the io object returned by makefile() on the client connection. You can read from this file to get output from the server. self.write_file is the io object returned by makefile() on the server connection. You can write to this file to send output to the client. """ bufsize = -1 # Use default buffer size encoding = 'utf-8' errors = 'strict' newline = None read_mode = 'rb' read_msg = MSG write_mode = 'wb' write_msg = MSG def __init__(self, methodName='runTest'): SocketConnectedTest.__init__(self, methodName=methodName) def setUp(self): self.evt1, self.evt2, self.serv_finished, self.cli_finished = [ threading.Event() for i in range(4)] SocketConnectedTest.setUp(self) self.read_file = self.cli_conn.makefile( self.read_mode, self.bufsize, encoding = self.encoding, errors = self.errors, newline = self.newline) def tearDown(self): self.serv_finished.set() self.read_file.close() self.assertTrue(self.read_file.closed) self.read_file = None SocketConnectedTest.tearDown(self) def clientSetUp(self): SocketConnectedTest.clientSetUp(self) self.write_file = self.serv_conn.makefile( self.write_mode, self.bufsize, encoding = self.encoding, errors = self.errors, newline = self.newline) def clientTearDown(self): self.cli_finished.set() self.write_file.close() self.assertTrue(self.write_file.closed) self.write_file = None SocketConnectedTest.clientTearDown(self) def testReadAfterTimeout(self): # Issue #7322: A file object must disallow further reads # after a timeout has occurred. self.cli_conn.settimeout(1) self.read_file.read(3) # First read raises a timeout self.assertRaises(TimeoutError, self.read_file.read, 1) # Second read is disallowed with self.assertRaises(OSError) as ctx: self.read_file.read(1) self.assertIn("cannot read from timed out object", str(ctx.exception)) def _testReadAfterTimeout(self): self.write_file.write(self.write_msg[0:3]) self.write_file.flush() self.serv_finished.wait() def testSmallRead(self): # Performing small file read test first_seg = self.read_file.read(len(self.read_msg)-3) second_seg = self.read_file.read(3) msg = first_seg + second_seg self.assertEqual(msg, self.read_msg) def _testSmallRead(self): self.write_file.write(self.write_msg) self.write_file.flush() def testFullRead(self): # read until EOF msg = self.read_file.read() self.assertEqual(msg, self.read_msg) def _testFullRead(self): self.write_file.write(self.write_msg) self.write_file.close() def testUnbufferedRead(self): # Performing unbuffered file read test buf = type(self.read_msg)() while 1: char = self.read_file.read(1) if not char: break buf += char self.assertEqual(buf, self.read_msg) def _testUnbufferedRead(self): self.write_file.write(self.write_msg) self.write_file.flush() def testReadline(self): # Performing file readline test line = self.read_file.readline() self.assertEqual(line, self.read_msg) def _testReadline(self): self.write_file.write(self.write_msg) self.write_file.flush() def testCloseAfterMakefile(self): # The file returned by makefile should keep the socket open. self.cli_conn.close() # read until EOF msg = self.read_file.read() self.assertEqual(msg, self.read_msg) def _testCloseAfterMakefile(self): self.write_file.write(self.write_msg) self.write_file.flush() def testMakefileAfterMakefileClose(self): self.read_file.close() msg = self.cli_conn.recv(len(MSG)) if isinstance(self.read_msg, str): msg = msg.decode() self.assertEqual(msg, self.read_msg) def _testMakefileAfterMakefileClose(self): self.write_file.write(self.write_msg) self.write_file.flush() def testClosedAttr(self): self.assertTrue(not self.read_file.closed) def _testClosedAttr(self): self.assertTrue(not self.write_file.closed) def testAttributes(self): self.assertEqual(self.read_file.mode, self.read_mode) self.assertEqual(self.read_file.name, self.cli_conn.fileno()) def _testAttributes(self): self.assertEqual(self.write_file.mode, self.write_mode) self.assertEqual(self.write_file.name, self.serv_conn.fileno()) def testRealClose(self): self.read_file.close() self.assertRaises(ValueError, self.read_file.fileno) self.cli_conn.close() self.assertRaises(OSError, self.cli_conn.getsockname) def _testRealClose(self): pass class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase): """Repeat the tests from FileObjectClassTestCase with bufsize==0. In this case (and in this case only), it should be possible to create a file object, read a line from it, create another file object, read another line from it, without loss of data in the first file object's buffer. Note that http.client relies on this when reading multiple requests from the same socket.""" bufsize = 0 # Use unbuffered mode def testUnbufferedReadline(self): # Read a line, create a new file object, read another line with it line = self.read_file.readline() # first line self.assertEqual(line, b"A. " + self.write_msg) # first line self.read_file = self.cli_conn.makefile('rb', 0) line = self.read_file.readline() # second line self.assertEqual(line, b"B. " + self.write_msg) # second line def _testUnbufferedReadline(self): self.write_file.write(b"A. " + self.write_msg) self.write_file.write(b"B. " + self.write_msg) self.write_file.flush() def testMakefileClose(self): # The file returned by makefile should keep the socket open... self.cli_conn.close() msg = self.cli_conn.recv(1024) self.assertEqual(msg, self.read_msg) # ...until the file is itself closed self.read_file.close() self.assertRaises(OSError, self.cli_conn.recv, 1024) def _testMakefileClose(self): self.write_file.write(self.write_msg) self.write_file.flush() def testMakefileCloseSocketDestroy(self): refcount_before = sys.getrefcount(self.cli_conn) self.read_file.close() refcount_after = sys.getrefcount(self.cli_conn) self.assertEqual(refcount_before - 1, refcount_after) def _testMakefileCloseSocketDestroy(self): pass # Non-blocking ops # NOTE: to set `read_file` as non-blocking, we must call # `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp). def testSmallReadNonBlocking(self): self.cli_conn.setblocking(False) self.assertEqual(self.read_file.readinto(bytearray(10)), None) self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None) self.evt1.set() self.evt2.wait(1.0) first_seg = self.read_file.read(len(self.read_msg) - 3) if first_seg is None: # Data not arrived (can happen under Windows), wait a bit time.sleep(0.5) first_seg = self.read_file.read(len(self.read_msg) - 3) buf = bytearray(10) n = self.read_file.readinto(buf) self.assertEqual(n, 3) msg = first_seg + buf[:n] self.assertEqual(msg, self.read_msg) self.assertEqual(self.read_file.readinto(bytearray(16)), None) self.assertEqual(self.read_file.read(1), None) def _testSmallReadNonBlocking(self): self.evt1.wait(1.0) self.write_file.write(self.write_msg) self.write_file.flush() self.evt2.set() # Avoid closing the socket before the server test has finished, # otherwise system recv() will return 0 instead of EWOULDBLOCK. self.serv_finished.wait(5.0) def testWriteNonBlocking(self): self.cli_finished.wait(5.0) # The client thread can't skip directly - the SkipTest exception # would appear as a failure. if self.serv_skipped: self.skipTest(self.serv_skipped) def _testWriteNonBlocking(self): self.serv_skipped = None self.serv_conn.setblocking(False) # Try to saturate the socket buffer pipe with repeated large writes. BIG = b"x" * support.SOCK_MAX_SIZE LIMIT = 10 # The first write() succeeds since a chunk of data can be buffered n = self.write_file.write(BIG) self.assertGreater(n, 0) for i in range(LIMIT): n = self.write_file.write(BIG) if n is None: # Succeeded break self.assertGreater(n, 0) else: # Let us know that this test didn't manage to establish # the expected conditions. This is not a failure in itself but, # if it happens repeatedly, the test should be fixed. self.serv_skipped = "failed to saturate the socket buffer" class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase): bufsize = 1 # Default-buffered for reading; line-buffered for writing class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase): bufsize = 2 # Exercise the buffering code class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase): """Tests for socket.makefile() in text mode (rather than binary)""" read_mode = 'r' read_msg = MSG.decode('utf-8') write_mode = 'wb' write_msg = MSG newline = '' class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase): """Tests for socket.makefile() in text mode (rather than binary)""" read_mode = 'rb' read_msg = MSG write_mode = 'w' write_msg = MSG.decode('utf-8') newline = '' class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase): """Tests for socket.makefile() in text mode (rather than binary)""" read_mode = 'r' read_msg = MSG.decode('utf-8') write_mode = 'w' write_msg = MSG.decode('utf-8') newline = '' class NetworkConnectionTest(object): """Prove network connection.""" def clientSetUp(self): # We're inherited below by BasicTCPTest2, which also inherits # BasicTCPTest, which defines self.port referenced below. self.cli = socket.create_connection((HOST, self.port)) self.serv_conn = self.cli class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest): """Tests that NetworkConnection does not break existing TCP functionality. """ class NetworkConnectionNoServer(unittest.TestCase): class MockSocket(socket.socket): def connect(self, *args): raise TimeoutError('timed out') @contextlib.contextmanager def mocked_socket_module(self): """Return a socket which times out on connect""" old_socket = socket.socket socket.socket = self.MockSocket try: yield finally: socket.socket = old_socket def test_connect(self): port = socket_helper.find_unused_port() cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addCleanup(cli.close) with self.assertRaises(OSError) as cm: cli.connect((HOST, port)) self.assertEqual(cm.exception.errno, errno.ECONNREFUSED) def test_create_connection(self): # Issue #9792: errors raised by create_connection() should have # a proper errno attribute. port = socket_helper.find_unused_port() with self.assertRaises(OSError) as cm: socket.create_connection((HOST, port)) # Issue #16257: create_connection() calls getaddrinfo() against # 'localhost'. This may result in an IPV6 addr being returned # as well as an IPV4 one: # >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM) # >>> [(2, 2, 0, '', ('127.0.0.1', 41230)), # (26, 2, 0, '', ('::1', 41230, 0, 0))] # # create_connection() enumerates through all the addresses returned # and if it doesn't successfully bind to any of them, it propagates # the last exception it encountered. # # On Solaris, ENETUNREACH is returned in this circumstance instead # of ECONNREFUSED. So, if that errno exists, add it to our list of # expected errnos. expected_errnos = socket_helper.get_socket_conn_refused_errs() self.assertIn(cm.exception.errno, expected_errnos) def test_create_connection_timeout(self): # Issue #9792: create_connection() should not recast timeout errors # as generic socket errors. with self.mocked_socket_module(): try: socket.create_connection((HOST, 1234)) except TimeoutError: pass except OSError as exc: if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT: raise else: self.fail('TimeoutError not raised') class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketTCPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): self.source_port = socket_helper.find_unused_port() def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) def _justAccept(self): conn, addr = self.serv.accept() conn.close() testFamily = _justAccept def _testFamily(self): self.cli = socket.create_connection((HOST, self.port), timeout=support.LOOPBACK_TIMEOUT) self.addCleanup(self.cli.close) self.assertEqual(self.cli.family, 2) testSourceAddress = _justAccept def _testSourceAddress(self): self.cli = socket.create_connection((HOST, self.port), timeout=support.LOOPBACK_TIMEOUT, source_address=('', self.source_port)) self.addCleanup(self.cli.close) self.assertEqual(self.cli.getsockname()[1], self.source_port) # The port number being used is sufficient to show that the bind() # call happened. testTimeoutDefault = _justAccept def _testTimeoutDefault(self): # passing no explicit timeout uses socket's global default self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(42) try: self.cli = socket.create_connection((HOST, self.port)) self.addCleanup(self.cli.close) finally: socket.setdefaulttimeout(None) self.assertEqual(self.cli.gettimeout(), 42) testTimeoutNone = _justAccept def _testTimeoutNone(self): # None timeout means the same as sock.settimeout(None) self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: self.cli = socket.create_connection((HOST, self.port), timeout=None) self.addCleanup(self.cli.close) finally: socket.setdefaulttimeout(None) self.assertEqual(self.cli.gettimeout(), None) testTimeoutValueNamed = _justAccept def _testTimeoutValueNamed(self): self.cli = socket.create_connection((HOST, self.port), timeout=30) self.assertEqual(self.cli.gettimeout(), 30) testTimeoutValueNonamed = _justAccept def _testTimeoutValueNonamed(self): self.cli = socket.create_connection((HOST, self.port), 30) self.addCleanup(self.cli.close) self.assertEqual(self.cli.gettimeout(), 30) class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest): def __init__(self, methodName='runTest'): SocketTCPTest.__init__(self, methodName=methodName) ThreadableTest.__init__(self) def clientSetUp(self): pass def clientTearDown(self): self.cli.close() self.cli = None ThreadableTest.clientTearDown(self) def testInsideTimeout(self): conn, addr = self.serv.accept() self.addCleanup(conn.close) time.sleep(3) conn.send(b"done!") testOutsideTimeout = testInsideTimeout def _testInsideTimeout(self): self.cli = sock = socket.create_connection((HOST, self.port)) data = sock.recv(5) self.assertEqual(data, b"done!") def _testOutsideTimeout(self): self.cli = sock = socket.create_connection((HOST, self.port), timeout=1) self.assertRaises(TimeoutError, lambda: sock.recv(5)) class TCPTimeoutTest(SocketTCPTest): def testTCPTimeout(self): def raise_timeout(*args, **kwargs): self.serv.settimeout(1.0) self.serv.accept() self.assertRaises(TimeoutError, raise_timeout, "Error generating a timeout exception (TCP)") def testTimeoutZero(self): ok = False try: self.serv.settimeout(0.0) foo = self.serv.accept() except TimeoutError: self.fail("caught timeout instead of error (TCP)") except OSError: ok = True except: self.fail("caught unexpected exception (TCP)") if not ok: self.fail("accept() returned success when we did not expect it") @unittest.skipUnless(hasattr(signal, 'alarm'), 'test needs signal.alarm()') def testInterruptedTimeout(self): # XXX I don't know how to do this test on MSWindows or any other # platform that doesn't support signal.alarm() or os.kill(), though # the bug should have existed on all platforms. self.serv.settimeout(5.0) # must be longer than alarm class Alarm(Exception): pass def alarm_handler(signal, frame): raise Alarm old_alarm = signal.signal(signal.SIGALRM, alarm_handler) try: try: signal.alarm(2) # POSIX allows alarm to be up to 1 second early foo = self.serv.accept() except TimeoutError: self.fail("caught timeout instead of Alarm") except Alarm: pass except: self.fail("caught other exception instead of Alarm:" " %s(%s):\n%s" % (sys.exc_info()[:2] + (traceback.format_exc(),))) else: self.fail("nothing caught") finally: signal.alarm(0) # shut off alarm except Alarm: self.fail("got Alarm in wrong place") finally: # no alarm can be pending. Safe to restore old handler. signal.signal(signal.SIGALRM, old_alarm) class UDPTimeoutTest(SocketUDPTest): def testUDPTimeout(self): def raise_timeout(*args, **kwargs): self.serv.settimeout(1.0) self.serv.recv(1024) self.assertRaises(TimeoutError, raise_timeout, "Error generating a timeout exception (UDP)") def testTimeoutZero(self): ok = False try: self.serv.settimeout(0.0) foo = self.serv.recv(1024) except TimeoutError: self.fail("caught timeout instead of error (UDP)") except OSError: ok = True except: self.fail("caught unexpected exception (UDP)") if not ok: self.fail("recv() returned success when we did not expect it") @unittest.skipUnless(HAVE_SOCKET_UDPLITE, 'UDPLITE sockets required for this test.') class UDPLITETimeoutTest(SocketUDPLITETest): def testUDPLITETimeout(self): def raise_timeout(*args, **kwargs): self.serv.settimeout(1.0) self.serv.recv(1024) self.assertRaises(TimeoutError, raise_timeout, "Error generating a timeout exception (UDPLITE)") def testTimeoutZero(self): ok = False try: self.serv.settimeout(0.0) foo = self.serv.recv(1024) except TimeoutError: self.fail("caught timeout instead of error (UDPLITE)") except OSError: ok = True except: self.fail("caught unexpected exception (UDPLITE)") if not ok: self.fail("recv() returned success when we did not expect it") class TestExceptions(unittest.TestCase): def testExceptionTree(self): self.assertTrue(issubclass(OSError, Exception)) self.assertTrue(issubclass(socket.herror, OSError)) self.assertTrue(issubclass(socket.gaierror, OSError)) self.assertTrue(issubclass(socket.timeout, OSError)) self.assertIs(socket.error, OSError) self.assertIs(socket.timeout, TimeoutError) def test_setblocking_invalidfd(self): # Regression test for issue #28471 sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno()) sock0.close() self.addCleanup(sock.detach) with self.assertRaises(OSError): sock.setblocking(False) @unittest.skipUnless(sys.platform == 'linux', 'Linux specific test') class TestLinuxAbstractNamespace(unittest.TestCase): UNIX_PATH_MAX = 108 def testLinuxAbstractNamespace(self): address = b"\x00python-test-hello\x00\xff" with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1: s1.bind(address) s1.listen() with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2: s2.connect(s1.getsockname()) with s1.accept()[0] as s3: self.assertEqual(s1.getsockname(), address) self.assertEqual(s2.getpeername(), address) def testMaxName(self): address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1) with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: s.bind(address) self.assertEqual(s.getsockname(), address) def testNameOverflow(self): address = "\x00" + "h" * self.UNIX_PATH_MAX with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: self.assertRaises(OSError, s.bind, address) def testStrName(self): # Check that an abstract name can be passed as a string. s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: s.bind("\x00python\x00test\x00") self.assertEqual(s.getsockname(), b"\x00python\x00test\x00") finally: s.close() def testBytearrayName(self): # Check that an abstract name can be passed as a bytearray. with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: s.bind(bytearray(b"\x00python\x00test\x00")) self.assertEqual(s.getsockname(), b"\x00python\x00test\x00") @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX') class TestUnixDomain(unittest.TestCase): def setUp(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) def tearDown(self): self.sock.close() def encoded(self, path): # Return the given path encoded in the file system encoding, # or skip the test if this is not possible. try: return os.fsencode(path) except UnicodeEncodeError: self.skipTest( "Pathname {0!a} cannot be represented in file " "system encoding {1!r}".format( path, sys.getfilesystemencoding())) def bind(self, sock, path): # Bind the socket try: socket_helper.bind_unix_socket(sock, path) except OSError as e: if str(e) == "AF_UNIX path too long": self.skipTest( "Pathname {0!a} is too long to serve as an AF_UNIX path" .format(path)) else: raise def testUnbound(self): # Issue #30205 (note getsockname() can return None on OS X) self.assertIn(self.sock.getsockname(), ('', None)) def testStrAddr(self): # Test binding to and retrieving a normal string pathname. path = os.path.abspath(os_helper.TESTFN) self.bind(self.sock, path) self.addCleanup(os_helper.unlink, path) self.assertEqual(self.sock.getsockname(), path) def testBytesAddr(self): # Test binding to a bytes pathname. path = os.path.abspath(os_helper.TESTFN) self.bind(self.sock, self.encoded(path)) self.addCleanup(os_helper.unlink, path) self.assertEqual(self.sock.getsockname(), path) def testSurrogateescapeBind(self): # Test binding to a valid non-ASCII pathname, with the # non-ASCII bytes supplied using surrogateescape encoding. path = os.path.abspath(os_helper.TESTFN_UNICODE) b = self.encoded(path) self.bind(self.sock, b.decode("ascii", "surrogateescape")) self.addCleanup(os_helper.unlink, path) self.assertEqual(self.sock.getsockname(), path) def testUnencodableAddr(self): # Test binding to a pathname that cannot be encoded in the # file system encoding. if os_helper.TESTFN_UNENCODABLE is None: self.skipTest("No unencodable filename available") path = os.path.abspath(os_helper.TESTFN_UNENCODABLE) self.bind(self.sock, path) self.addCleanup(os_helper.unlink, path) self.assertEqual(self.sock.getsockname(), path) class BufferIOTest(SocketConnectedTest): """ Test the buffer versions of socket.recv() and socket.send(). """ def __init__(self, methodName='runTest'): SocketConnectedTest.__init__(self, methodName=methodName) def testRecvIntoArray(self): buf = array.array("B", [0] * len(MSG)) nbytes = self.cli_conn.recv_into(buf) self.assertEqual(nbytes, len(MSG)) buf = buf.tobytes() msg = buf[:len(MSG)] self.assertEqual(msg, MSG) def _testRecvIntoArray(self): buf = bytes(MSG) self.serv_conn.send(buf) def testRecvIntoBytearray(self): buf = bytearray(1024) nbytes = self.cli_conn.recv_into(buf) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvIntoBytearray = _testRecvIntoArray def testRecvIntoMemoryview(self): buf = bytearray(1024) nbytes = self.cli_conn.recv_into(memoryview(buf)) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvIntoMemoryview = _testRecvIntoArray def testRecvFromIntoArray(self): buf = array.array("B", [0] * len(MSG)) nbytes, addr = self.cli_conn.recvfrom_into(buf) self.assertEqual(nbytes, len(MSG)) buf = buf.tobytes() msg = buf[:len(MSG)] self.assertEqual(msg, MSG) def _testRecvFromIntoArray(self): buf = bytes(MSG) self.serv_conn.send(buf) def testRecvFromIntoBytearray(self): buf = bytearray(1024) nbytes, addr = self.cli_conn.recvfrom_into(buf) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvFromIntoBytearray = _testRecvFromIntoArray def testRecvFromIntoMemoryview(self): buf = bytearray(1024) nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf)) self.assertEqual(nbytes, len(MSG)) msg = buf[:len(MSG)] self.assertEqual(msg, MSG) _testRecvFromIntoMemoryview = _testRecvFromIntoArray def testRecvFromIntoSmallBuffer(self): # See issue #20246. buf = bytearray(8) self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024) def _testRecvFromIntoSmallBuffer(self): self.serv_conn.send(MSG) def testRecvFromIntoEmptyBuffer(self): buf = bytearray() self.cli_conn.recvfrom_into(buf) self.cli_conn.recvfrom_into(buf, 0) _testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray TIPC_STYPE = 2000 TIPC_LOWER = 200 TIPC_UPPER = 210 def isTipcAvailable(): """Check if the TIPC module is loaded The TIPC module is not loaded automatically on Ubuntu and probably other Linux distros. """ if not hasattr(socket, "AF_TIPC"): return False try: f = open("/proc/modules", encoding="utf-8") except (FileNotFoundError, IsADirectoryError, PermissionError): # It's ok if the file does not exist, is a directory or if we # have not the permission to read it. return False with f: for line in f: if line.startswith("tipc "): return True return False @unittest.skipUnless(isTipcAvailable(), "TIPC module is not loaded, please 'sudo modprobe tipc'") class TIPCTest(unittest.TestCase): def testRDM(self): srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM) cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM) self.addCleanup(srv.close) self.addCleanup(cli.close) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE, TIPC_LOWER, TIPC_UPPER) srv.bind(srvaddr) sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE, TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0) cli.sendto(MSG, sendaddr) msg, recvaddr = srv.recvfrom(1024) self.assertEqual(cli.getsockname(), recvaddr) self.assertEqual(msg, MSG) @unittest.skipUnless(isTipcAvailable(), "TIPC module is not loaded, please 'sudo modprobe tipc'") class TIPCThreadableTest(unittest.TestCase, ThreadableTest): def __init__(self, methodName = 'runTest'): unittest.TestCase.__init__(self, methodName = methodName) ThreadableTest.__init__(self) def setUp(self): self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM) self.addCleanup(self.srv.close) self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE, TIPC_LOWER, TIPC_UPPER) self.srv.bind(srvaddr) self.srv.listen() self.serverExplicitReady() self.conn, self.connaddr = self.srv.accept() self.addCleanup(self.conn.close) def clientSetUp(self): # There is a hittable race between serverExplicitReady() and the # accept() call; sleep a little while to avoid it, otherwise # we could get an exception time.sleep(0.1) self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM) self.addCleanup(self.cli.close) addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE, TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0) self.cli.connect(addr) self.cliaddr = self.cli.getsockname() def testStream(self): msg = self.conn.recv(1024) self.assertEqual(msg, MSG) self.assertEqual(self.cliaddr, self.connaddr) def _testStream(self): self.cli.send(MSG) self.cli.close() class ContextManagersTest(ThreadedTCPSocketTest): def _testSocketClass(self): # base test with socket.socket() as sock: self.assertFalse(sock._closed) self.assertTrue(sock._closed) # close inside with block with socket.socket() as sock: sock.close() self.assertTrue(sock._closed) # exception inside with block with socket.socket() as sock: self.assertRaises(OSError, sock.sendall, b'foo') self.assertTrue(sock._closed) def testCreateConnectionBase(self): conn, addr = self.serv.accept() self.addCleanup(conn.close) data = conn.recv(1024) conn.sendall(data) def _testCreateConnectionBase(self): address = self.serv.getsockname() with socket.create_connection(address) as sock: self.assertFalse(sock._closed) sock.sendall(b'foo') self.assertEqual(sock.recv(1024), b'foo') self.assertTrue(sock._closed) def testCreateConnectionClose(self): conn, addr = self.serv.accept() self.addCleanup(conn.close) data = conn.recv(1024) conn.sendall(data) def _testCreateConnectionClose(self): address = self.serv.getsockname() with socket.create_connection(address) as sock: sock.close() self.assertTrue(sock._closed) self.assertRaises(OSError, sock.sendall, b'foo') class InheritanceTest(unittest.TestCase): @unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"), "SOCK_CLOEXEC not defined") @support.requires_linux_version(2, 6, 28) def test_SOCK_CLOEXEC(self): with socket.socket(socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s: self.assertEqual(s.type, socket.SOCK_STREAM) self.assertFalse(s.get_inheritable()) def test_default_inheritable(self): sock = socket.socket() with sock: self.assertEqual(sock.get_inheritable(), False) def test_dup(self): sock = socket.socket() with sock: newsock = sock.dup() sock.close() with newsock: self.assertEqual(newsock.get_inheritable(), False) def test_set_inheritable(self): sock = socket.socket() with sock: sock.set_inheritable(True) self.assertEqual(sock.get_inheritable(), True) sock.set_inheritable(False) self.assertEqual(sock.get_inheritable(), False) @unittest.skipIf(fcntl is None, "need fcntl") def test_get_inheritable_cloexec(self): sock = socket.socket() with sock: fd = sock.fileno() self.assertEqual(sock.get_inheritable(), False) # clear FD_CLOEXEC flag flags = fcntl.fcntl(fd, fcntl.F_GETFD) flags &= ~fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) self.assertEqual(sock.get_inheritable(), True) @unittest.skipIf(fcntl is None, "need fcntl") def test_set_inheritable_cloexec(self): sock = socket.socket() with sock: fd = sock.fileno() self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC, fcntl.FD_CLOEXEC) sock.set_inheritable(True) self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC, 0) def test_socketpair(self): s1, s2 = socket.socketpair() self.addCleanup(s1.close) self.addCleanup(s2.close) self.assertEqual(s1.get_inheritable(), False) self.assertEqual(s2.get_inheritable(), False) @unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"), "SOCK_NONBLOCK not defined") class NonblockConstantTest(unittest.TestCase): def checkNonblock(self, s, nonblock=True, timeout=0.0): if nonblock: self.assertEqual(s.type, socket.SOCK_STREAM) self.assertEqual(s.gettimeout(), timeout) self.assertTrue( fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK) if timeout == 0: # timeout == 0: means that getblocking() must be False. self.assertFalse(s.getblocking()) else: # If timeout > 0, the socket will be in a "blocking" mode # from the standpoint of the Python API. For Python socket # object, "blocking" means that operations like 'sock.recv()' # will block. Internally, file descriptors for # "blocking" Python sockets *with timeouts* are in a # *non-blocking* mode, and 'sock.recv()' uses 'select()' # and handles EWOULDBLOCK/EAGAIN to enforce the timeout. self.assertTrue(s.getblocking()) else: self.assertEqual(s.type, socket.SOCK_STREAM) self.assertEqual(s.gettimeout(), None) self.assertFalse( fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK) self.assertTrue(s.getblocking()) @support.requires_linux_version(2, 6, 28) def test_SOCK_NONBLOCK(self): # a lot of it seems silly and redundant, but I wanted to test that # changing back and forth worked ok with socket.socket(socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s: self.checkNonblock(s) s.setblocking(True) self.checkNonblock(s, nonblock=False) s.setblocking(False) self.checkNonblock(s) s.settimeout(None) self.checkNonblock(s, nonblock=False) s.settimeout(2.0) self.checkNonblock(s, timeout=2.0) s.setblocking(True) self.checkNonblock(s, nonblock=False) # defaulttimeout t = socket.getdefaulttimeout() socket.setdefaulttimeout(0.0) with socket.socket() as s: self.checkNonblock(s) socket.setdefaulttimeout(None) with socket.socket() as s: self.checkNonblock(s, False) socket.setdefaulttimeout(2.0) with socket.socket() as s: self.checkNonblock(s, timeout=2.0) socket.setdefaulttimeout(None) with socket.socket() as s: self.checkNonblock(s, False) socket.setdefaulttimeout(t) @unittest.skipUnless(os.name == "nt", "Windows specific") @unittest.skipUnless(multiprocessing, "need multiprocessing") class TestSocketSharing(SocketTCPTest): # This must be classmethod and not staticmethod or multiprocessing # won't be able to bootstrap it. @classmethod def remoteProcessServer(cls, q): # Recreate socket from shared data sdata = q.get() message = q.get() s = socket.fromshare(sdata) s2, c = s.accept() # Send the message s2.sendall(message) s2.close() s.close() def testShare(self): # Transfer the listening server socket to another process # and service it from there. # Create process: q = multiprocessing.Queue() p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,)) p.start() # Get the shared socket data data = self.serv.share(p.pid) # Pass the shared socket to the other process addr = self.serv.getsockname() self.serv.close() q.put(data) # The data that the server will send us message = b"slapmahfro" q.put(message) # Connect s = socket.create_connection(addr) # listen for the data m = [] while True: data = s.recv(100) if not data: break m.append(data) s.close() received = b"".join(m) self.assertEqual(received, message) p.join() def testShareLength(self): data = self.serv.share(os.getpid()) self.assertRaises(ValueError, socket.fromshare, data[:-1]) self.assertRaises(ValueError, socket.fromshare, data+b"foo") def compareSockets(self, org, other): # socket sharing is expected to work only for blocking socket # since the internal python timeout value isn't transferred. self.assertEqual(org.gettimeout(), None) self.assertEqual(org.gettimeout(), other.gettimeout()) self.assertEqual(org.family, other.family) self.assertEqual(org.type, other.type) # If the user specified "0" for proto, then # internally windows will have picked the correct value. # Python introspection on the socket however will still return # 0. For the shared socket, the python value is recreated # from the actual value, so it may not compare correctly. if org.proto != 0: self.assertEqual(org.proto, other.proto) def testShareLocal(self): data = self.serv.share(os.getpid()) s = socket.fromshare(data) try: self.compareSockets(self.serv, s) finally: s.close() def testTypes(self): families = [socket.AF_INET, socket.AF_INET6] types = [socket.SOCK_STREAM, socket.SOCK_DGRAM] for f in families: for t in types: try: source = socket.socket(f, t) except OSError: continue # This combination is not supported try: data = source.share(os.getpid()) shared = socket.fromshare(data) try: self.compareSockets(source, shared) finally: shared.close() finally: source.close() class SendfileUsingSendTest(ThreadedTCPSocketTest): """ Test the send() implementation of socket.sendfile(). """ FILESIZE = (10 * 1024 * 1024) # 10 MiB BUFSIZE = 8192 FILEDATA = b"" TIMEOUT = support.LOOPBACK_TIMEOUT @classmethod def setUpClass(cls): def chunks(total, step): assert total >= step while total > step: yield step total -= step if total: yield total chunk = b"".join([random.choice(string.ascii_letters).encode() for i in range(cls.BUFSIZE)]) with open(os_helper.TESTFN, 'wb') as f: for csize in chunks(cls.FILESIZE, cls.BUFSIZE): f.write(chunk) with open(os_helper.TESTFN, 'rb') as f: cls.FILEDATA = f.read() assert len(cls.FILEDATA) == cls.FILESIZE @classmethod def tearDownClass(cls): os_helper.unlink(os_helper.TESTFN) def accept_conn(self): self.serv.settimeout(support.LONG_TIMEOUT) conn, addr = self.serv.accept() conn.settimeout(self.TIMEOUT) self.addCleanup(conn.close) return conn def recv_data(self, conn): received = [] while True: chunk = conn.recv(self.BUFSIZE) if not chunk: break received.append(chunk) return b''.join(received) def meth_from_sock(self, sock): # Depending on the mixin class being run return either send() # or sendfile() method implementation. return getattr(sock, "_sendfile_use_send") # regular file def _testRegularFile(self): address = self.serv.getsockname() file = open(os_helper.TESTFN, 'rb') with socket.create_connection(address) as sock, file as file: meth = self.meth_from_sock(sock) sent = meth(file) self.assertEqual(sent, self.FILESIZE) self.assertEqual(file.tell(), self.FILESIZE) def testRegularFile(self): conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(len(data), self.FILESIZE) self.assertEqual(data, self.FILEDATA) # non regular file def _testNonRegularFile(self): address = self.serv.getsockname() file = io.BytesIO(self.FILEDATA) with socket.create_connection(address) as sock, file as file: sent = sock.sendfile(file) self.assertEqual(sent, self.FILESIZE) self.assertEqual(file.tell(), self.FILESIZE) self.assertRaises(socket._GiveupOnSendfile, sock._sendfile_use_sendfile, file) def testNonRegularFile(self): conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(len(data), self.FILESIZE) self.assertEqual(data, self.FILEDATA) # empty file def _testEmptyFileSend(self): address = self.serv.getsockname() filename = os_helper.TESTFN + "2" with open(filename, 'wb'): self.addCleanup(os_helper.unlink, filename) file = open(filename, 'rb') with socket.create_connection(address) as sock, file as file: meth = self.meth_from_sock(sock) sent = meth(file) self.assertEqual(sent, 0) self.assertEqual(file.tell(), 0) def testEmptyFileSend(self): conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(data, b"") # offset def _testOffset(self): address = self.serv.getsockname() file = open(os_helper.TESTFN, 'rb') with socket.create_connection(address) as sock, file as file: meth = self.meth_from_sock(sock) sent = meth(file, offset=5000) self.assertEqual(sent, self.FILESIZE - 5000) self.assertEqual(file.tell(), self.FILESIZE) def testOffset(self): conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(len(data), self.FILESIZE - 5000) self.assertEqual(data, self.FILEDATA[5000:]) # count def _testCount(self): address = self.serv.getsockname() file = open(os_helper.TESTFN, 'rb') sock = socket.create_connection(address, timeout=support.LOOPBACK_TIMEOUT) with sock, file: count = 5000007 meth = self.meth_from_sock(sock) sent = meth(file, count=count) self.assertEqual(sent, count) self.assertEqual(file.tell(), count) def testCount(self): count = 5000007 conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(len(data), count) self.assertEqual(data, self.FILEDATA[:count]) # count small def _testCountSmall(self): address = self.serv.getsockname() file = open(os_helper.TESTFN, 'rb') sock = socket.create_connection(address, timeout=support.LOOPBACK_TIMEOUT) with sock, file: count = 1 meth = self.meth_from_sock(sock) sent = meth(file, count=count) self.assertEqual(sent, count) self.assertEqual(file.tell(), count) def testCountSmall(self): count = 1 conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(len(data), count) self.assertEqual(data, self.FILEDATA[:count]) # count + offset def _testCountWithOffset(self): address = self.serv.getsockname() file = open(os_helper.TESTFN, 'rb') with socket.create_connection(address, timeout=2) as sock, file as file: count = 100007 meth = self.meth_from_sock(sock) sent = meth(file, offset=2007, count=count) self.assertEqual(sent, count) self.assertEqual(file.tell(), count + 2007) def testCountWithOffset(self): count = 100007 conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(len(data), count) self.assertEqual(data, self.FILEDATA[2007:count+2007]) # non blocking sockets are not supposed to work def _testNonBlocking(self): address = self.serv.getsockname() file = open(os_helper.TESTFN, 'rb') with socket.create_connection(address) as sock, file as file: sock.setblocking(False) meth = self.meth_from_sock(sock) self.assertRaises(ValueError, meth, file) self.assertRaises(ValueError, sock.sendfile, file) def testNonBlocking(self): conn = self.accept_conn() if conn.recv(8192): self.fail('was not supposed to receive any data') # timeout (non-triggered) def _testWithTimeout(self): address = self.serv.getsockname() file = open(os_helper.TESTFN, 'rb') sock = socket.create_connection(address, timeout=support.LOOPBACK_TIMEOUT) with sock, file: meth = self.meth_from_sock(sock) sent = meth(file) self.assertEqual(sent, self.FILESIZE) def testWithTimeout(self): conn = self.accept_conn() data = self.recv_data(conn) self.assertEqual(len(data), self.FILESIZE) self.assertEqual(data, self.FILEDATA) # timeout (triggered) def _testWithTimeoutTriggeredSend(self): address = self.serv.getsockname() with open(os_helper.TESTFN, 'rb') as file: with socket.create_connection(address) as sock: sock.settimeout(0.01) meth = self.meth_from_sock(sock) self.assertRaises(TimeoutError, meth, file) def testWithTimeoutTriggeredSend(self): conn = self.accept_conn() conn.recv(88192) # errors def _test_errors(self): pass def test_errors(self): with open(os_helper.TESTFN, 'rb') as file: with socket.socket(type=socket.SOCK_DGRAM) as s: meth = self.meth_from_sock(s) self.assertRaisesRegex( ValueError, "SOCK_STREAM", meth, file) with open(os_helper.TESTFN, encoding="utf-8") as file: with socket.socket() as s: meth = self.meth_from_sock(s) self.assertRaisesRegex( ValueError, "binary mode", meth, file) with open(os_helper.TESTFN, 'rb') as file: with socket.socket() as s: meth = self.meth_from_sock(s) self.assertRaisesRegex(TypeError, "positive integer", meth, file, count='2') self.assertRaisesRegex(TypeError, "positive integer", meth, file, count=0.1) self.assertRaisesRegex(ValueError, "positive integer", meth, file, count=0) self.assertRaisesRegex(ValueError, "positive integer", meth, file, count=-1) @unittest.skipUnless(hasattr(os, "sendfile"), 'os.sendfile() required for this test.') class SendfileUsingSendfileTest(SendfileUsingSendTest): """ Test the sendfile() implementation of socket.sendfile(). """ def meth_from_sock(self, sock): return getattr(sock, "_sendfile_use_sendfile") @unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required') class LinuxKernelCryptoAPI(unittest.TestCase): # tests for AF_ALG def create_alg(self, typ, name): sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0) try: sock.bind((typ, name)) except FileNotFoundError as e: # type / algorithm is not available sock.close() raise unittest.SkipTest(str(e), typ, name) else: return sock # bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY, # at least on ppc64le architecture @support.requires_linux_version(4, 5) def test_sha256(self): expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396" "177a9cb410ff61f20015ad") with self.create_alg('hash', 'sha256') as algo: op, _ = algo.accept() with op: op.sendall(b"abc") self.assertEqual(op.recv(512), expected) op, _ = algo.accept() with op: op.send(b'a', socket.MSG_MORE) op.send(b'b', socket.MSG_MORE) op.send(b'c', socket.MSG_MORE) op.send(b'') self.assertEqual(op.recv(512), expected) def test_hmac_sha1(self): expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79") with self.create_alg('hash', 'hmac(sha1)') as algo: algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe") op, _ = algo.accept() with op: op.sendall(b"what do ya want for nothing?") self.assertEqual(op.recv(512), expected) # Although it should work with 3.19 and newer the test blocks on # Ubuntu 15.10 with Kernel 4.2.0-19. @support.requires_linux_version(4, 3) def test_aes_cbc(self): key = bytes.fromhex('06a9214036b8a15b512e03d534120006') iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41') msg = b"Single block msg" ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a') msglen = len(msg) with self.create_alg('skcipher', 'cbc(aes)') as algo: algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key) op, _ = algo.accept() with op: op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv, flags=socket.MSG_MORE) op.sendall(msg) self.assertEqual(op.recv(msglen), ciphertext) op, _ = algo.accept() with op: op.sendmsg_afalg([ciphertext], op=socket.ALG_OP_DECRYPT, iv=iv) self.assertEqual(op.recv(msglen), msg) # long message multiplier = 1024 longmsg = [msg] * multiplier op, _ = algo.accept() with op: op.sendmsg_afalg(longmsg, op=socket.ALG_OP_ENCRYPT, iv=iv) enc = op.recv(msglen * multiplier) self.assertEqual(len(enc), msglen * multiplier) self.assertEqual(enc[:msglen], ciphertext) op, _ = algo.accept() with op: op.sendmsg_afalg([enc], op=socket.ALG_OP_DECRYPT, iv=iv) dec = op.recv(msglen * multiplier) self.assertEqual(len(dec), msglen * multiplier) self.assertEqual(dec, msg * multiplier) @support.requires_linux_version(4, 9) # see issue29324 def test_aead_aes_gcm(self): key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c') iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2') plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069') assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f') expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354') expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd') taglen = len(expected_tag) assoclen = len(assoc) with self.create_alg('aead', 'gcm(aes)') as algo: algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key) algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE, None, taglen) # send assoc, plain and tag buffer in separate steps op, _ = algo.accept() with op: op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv, assoclen=assoclen, flags=socket.MSG_MORE) op.sendall(assoc, socket.MSG_MORE) op.sendall(plain) res = op.recv(assoclen + len(plain) + taglen) self.assertEqual(expected_ct, res[assoclen:-taglen]) self.assertEqual(expected_tag, res[-taglen:]) # now with msg op, _ = algo.accept() with op: msg = assoc + plain op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv, assoclen=assoclen) res = op.recv(assoclen + len(plain) + taglen) self.assertEqual(expected_ct, res[assoclen:-taglen]) self.assertEqual(expected_tag, res[-taglen:]) # create anc data manually pack_uint32 = struct.Struct('I').pack op, _ = algo.accept() with op: msg = assoc + plain op.sendmsg( [msg], ([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)], [socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv], [socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)], ) ) res = op.recv(len(msg) + taglen) self.assertEqual(expected_ct, res[assoclen:-taglen]) self.assertEqual(expected_tag, res[-taglen:]) # decrypt and verify op, _ = algo.accept() with op: msg = assoc + expected_ct + expected_tag op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv, assoclen=assoclen) res = op.recv(len(msg) - taglen) self.assertEqual(plain, res[assoclen:]) @support.requires_linux_version(4, 3) # see test_aes_cbc def test_drbg_pr_sha256(self): # deterministic random bit generator, prediction resistance, sha256 with self.create_alg('rng', 'drbg_pr_sha256') as algo: extra_seed = os.urandom(32) algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed) op, _ = algo.accept() with op: rn = op.recv(32) self.assertEqual(len(rn), 32) def test_sendmsg_afalg_args(self): sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0) with sock: with self.assertRaises(TypeError): sock.sendmsg_afalg() with self.assertRaises(TypeError): sock.sendmsg_afalg(op=None) with self.assertRaises(TypeError): sock.sendmsg_afalg(1) with self.assertRaises(TypeError): sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None) with self.assertRaises(TypeError): sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1) def test_length_restriction(self): # bpo-35050, off-by-one error in length check sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0) self.addCleanup(sock.close) # salg_type[14] with self.assertRaises(FileNotFoundError): sock.bind(("t" * 13, "name")) with self.assertRaisesRegex(ValueError, "type too long"): sock.bind(("t" * 14, "name")) # salg_name[64] with self.assertRaises(FileNotFoundError): sock.bind(("type", "n" * 63)) with self.assertRaisesRegex(ValueError, "name too long"): sock.bind(("type", "n" * 64)) @unittest.skipUnless(sys.platform.startswith("win"), "requires Windows") class TestMSWindowsTCPFlags(unittest.TestCase): knownTCPFlags = { # available since long time ago 'TCP_MAXSEG', 'TCP_NODELAY', # available starting with Windows 10 1607 'TCP_FASTOPEN', # available starting with Windows 10 1703 'TCP_KEEPCNT', # available starting with Windows 10 1709 'TCP_KEEPIDLE', 'TCP_KEEPINTVL' } def test_new_tcp_flags(self): provided = [s for s in dir(socket) if s.startswith('TCP')] unknown = [s for s in provided if s not in self.knownTCPFlags] self.assertEqual([], unknown, "New TCP flags were discovered. See bpo-32394 for more information") class CreateServerTest(unittest.TestCase): def test_address(self): port = socket_helper.find_unused_port() with socket.create_server(("127.0.0.1", port)) as sock: self.assertEqual(sock.getsockname()[0], "127.0.0.1") self.assertEqual(sock.getsockname()[1], port) if socket_helper.IPV6_ENABLED: with socket.create_server(("::1", port), family=socket.AF_INET6) as sock: self.assertEqual(sock.getsockname()[0], "::1") self.assertEqual(sock.getsockname()[1], port) def test_family_and_type(self): with socket.create_server(("127.0.0.1", 0)) as sock: self.assertEqual(sock.family, socket.AF_INET) self.assertEqual(sock.type, socket.SOCK_STREAM) if socket_helper.IPV6_ENABLED: with socket.create_server(("::1", 0), family=socket.AF_INET6) as s: self.assertEqual(s.family, socket.AF_INET6) self.assertEqual(sock.type, socket.SOCK_STREAM) def test_reuse_port(self): if not hasattr(socket, "SO_REUSEPORT"): with self.assertRaises(ValueError): socket.create_server(("localhost", 0), reuse_port=True) else: with socket.create_server(("localhost", 0)) as sock: opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) self.assertEqual(opt, 0) with socket.create_server(("localhost", 0), reuse_port=True) as sock: opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) self.assertNotEqual(opt, 0) @unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or not hasattr(_socket, 'IPV6_V6ONLY'), "IPV6_V6ONLY option not supported") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test') def test_ipv6_only_default(self): with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock: assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY) @unittest.skipIf(not socket.has_dualstack_ipv6(), "dualstack_ipv6 not supported") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test') def test_dualstack_ipv6_family(self): with socket.create_server(("::1", 0), family=socket.AF_INET6, dualstack_ipv6=True) as sock: self.assertEqual(sock.family, socket.AF_INET6) class CreateServerFunctionalTest(unittest.TestCase): timeout = support.LOOPBACK_TIMEOUT def setUp(self): self.thread = None def tearDown(self): if self.thread is not None: self.thread.join(self.timeout) def echo_server(self, sock): def run(sock): with sock: conn, _ = sock.accept() with conn: event.wait(self.timeout) msg = conn.recv(1024) if not msg: return conn.sendall(msg) event = threading.Event() sock.settimeout(self.timeout) self.thread = threading.Thread(target=run, args=(sock, )) self.thread.start() event.set() def echo_client(self, addr, family): with socket.socket(family=family) as sock: sock.settimeout(self.timeout) sock.connect(addr) sock.sendall(b'foo') self.assertEqual(sock.recv(1024), b'foo') def test_tcp4(self): port = socket_helper.find_unused_port() with socket.create_server(("", port)) as sock: self.echo_server(sock) self.echo_client(("127.0.0.1", port), socket.AF_INET) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test') def test_tcp6(self): port = socket_helper.find_unused_port() with socket.create_server(("", port), family=socket.AF_INET6) as sock: self.echo_server(sock) self.echo_client(("::1", port), socket.AF_INET6) # --- dual stack tests @unittest.skipIf(not socket.has_dualstack_ipv6(), "dualstack_ipv6 not supported") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test') def test_dual_stack_client_v4(self): port = socket_helper.find_unused_port() with socket.create_server(("", port), family=socket.AF_INET6, dualstack_ipv6=True) as sock: self.echo_server(sock) self.echo_client(("127.0.0.1", port), socket.AF_INET) @unittest.skipIf(not socket.has_dualstack_ipv6(), "dualstack_ipv6 not supported") @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test') def test_dual_stack_client_v6(self): port = socket_helper.find_unused_port() with socket.create_server(("", port), family=socket.AF_INET6, dualstack_ipv6=True) as sock: self.echo_server(sock) self.echo_client(("::1", port), socket.AF_INET6) @requireAttrs(socket, "send_fds") @requireAttrs(socket, "recv_fds") @requireAttrs(socket, "AF_UNIX") class SendRecvFdsTests(unittest.TestCase): def testSendAndRecvFds(self): def close_pipes(pipes): for fd1, fd2 in pipes: os.close(fd1) os.close(fd2) def close_fds(fds): for fd in fds: os.close(fd) # send 10 file descriptors pipes = [os.pipe() for _ in range(10)] self.addCleanup(close_pipes, pipes) fds = [rfd for rfd, wfd in pipes] # use a UNIX socket pair to exchange file descriptors locally sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) with sock1, sock2: socket.send_fds(sock1, [MSG], fds) # request more data and file descriptors than expected msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2) self.addCleanup(close_fds, fds2) self.assertEqual(msg, MSG) self.assertEqual(len(fds2), len(fds)) self.assertEqual(flags, 0) # don't test addr # test that file descriptors are connected for index, fds in enumerate(pipes): rfd, wfd = fds os.write(wfd, str(index).encode()) for index, rfd in enumerate(fds2): data = os.read(rfd, 100) self.assertEqual(data, str(index).encode()) def test_main(): tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest, TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest, SendRecvFdsTests] tests.extend([ NonBlockingTCPTests, FileObjectClassTestCase, UnbufferedFileObjectClassTestCase, LineBufferedFileObjectClassTestCase, SmallBufferedFileObjectClassTestCase, UnicodeReadFileObjectClassTestCase, UnicodeWriteFileObjectClassTestCase, UnicodeReadWriteFileObjectClassTestCase, NetworkConnectionNoServer, NetworkConnectionAttributesTest, NetworkConnectionBehaviourTest, ContextManagersTest, InheritanceTest, NonblockConstantTest ]) tests.append(BasicSocketPairTest) tests.append(TestUnixDomain) tests.append(TestLinuxAbstractNamespace) tests.extend([TIPCTest, TIPCThreadableTest]) tests.extend([BasicCANTest, CANTest]) tests.extend([BasicRDSTest, RDSTest]) tests.append(LinuxKernelCryptoAPI) tests.append(BasicQIPCRTRTest) tests.extend([ BasicVSOCKTest, ThreadedVSOCKSocketStreamTest, ]) tests.append(BasicBluetoothTest) tests.extend([ CmsgMacroTests, SendmsgUDPTest, RecvmsgUDPTest, RecvmsgIntoUDPTest, SendmsgUDP6Test, RecvmsgUDP6Test, RecvmsgRFC3542AncillaryUDP6Test, RecvmsgIntoRFC3542AncillaryUDP6Test, RecvmsgIntoUDP6Test, SendmsgUDPLITETest, RecvmsgUDPLITETest, RecvmsgIntoUDPLITETest, SendmsgUDPLITE6Test, RecvmsgUDPLITE6Test, RecvmsgRFC3542AncillaryUDPLITE6Test, RecvmsgIntoRFC3542AncillaryUDPLITE6Test, RecvmsgIntoUDPLITE6Test, SendmsgTCPTest, RecvmsgTCPTest, RecvmsgIntoTCPTest, SendmsgSCTPStreamTest, RecvmsgSCTPStreamTest, RecvmsgIntoSCTPStreamTest, SendmsgUnixStreamTest, RecvmsgUnixStreamTest, RecvmsgIntoUnixStreamTest, RecvmsgSCMRightsStreamTest, RecvmsgIntoSCMRightsStreamTest, # These are slow when setitimer() is not available InterruptedRecvTimeoutTest, InterruptedSendTimeoutTest, TestSocketSharing, SendfileUsingSendTest, SendfileUsingSendfileTest, ]) tests.append(TestMSWindowsTCPFlags) thread_info = threading_helper.threading_setup() support.run_unittest(*tests) threading_helper.threading_cleanup(*thread_info) if __name__ == "__main__": test_main()
monobeast.py
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os import pprint import threading import time import timeit import traceback import typing os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading. import torch from torch import multiprocessing as mp from torch import nn from torch.nn import functional as F from torchbeast import atari_wrappers from torchbeast.core import environment from torchbeast.core import file_writer from torchbeast.core import prof from torchbeast.core import vtrace from torchbeast import attention_net from utils.util_functions import ( compute_baseline_loss, compute_entropy_loss, compute_policy_gradient_loss, create_env, ) # yapf: disable parser = argparse.ArgumentParser(description="PyTorch Scalable Agent") parser.add_argument("--env", type=str, default="Seaquest-v0", help="Gym environment.") parser.add_argument("--mode", default="train", choices=["train", "test", "test_render"], help="Training or test mode.") parser.add_argument("--xpid", default=None, help="Experiment id (default: None).") # Training settings. parser.add_argument("--disable_checkpoint", action="store_true", help="Disable saving checkpoint.") parser.add_argument("--savedir", default="~/logs/torchbeast", help="Root dir where experiment data will be saved.") # victor: parse checkpoint dir for continuing learning parser.add_argument("--checkpoint_file", default=None, help="checkpoint-file for training continuing") parser.add_argument("--num_actors", default=4, type=int, metavar="N", help="Number of actors (default: 4).") parser.add_argument("--total_steps", default=100000, type=int, metavar="T", help="Total environment steps to train for.") parser.add_argument("--batch_size", default=8, type=int, metavar="B", help="Learner batch size.") parser.add_argument("--unroll_length", default=80, type=int, metavar="T", help="The unroll length (time dimension).") parser.add_argument("--num_buffers", default=None, type=int, metavar="N", help="Number of shared-memory buffers.") parser.add_argument("--num_learner_threads", "--num_threads", default=2, type=int, metavar="N", help="Number learner threads.") parser.add_argument("--disable_cuda", action="store_true", help="Disable CUDA.") parser.add_argument("--use_lstm", action="store_true", help="Use LSTM in agent model.") # Loss settings. parser.add_argument("--entropy_cost", default=0.0006, type=float, help="Entropy cost/multiplier.") parser.add_argument("--baseline_cost", default=0.5, type=float, help="Baseline cost/multiplier.") parser.add_argument("--discounting", default=0.99, type=float, help="Discounting factor.") parser.add_argument("--reward_clipping", default="abs_one", choices=["abs_one", "none"], help="Reward clipping.") # Optimizer settings. parser.add_argument("--learning_rate", default=0.00048, type=float, metavar="LR", help="Learning rate.") parser.add_argument("--alpha", default=0.99, type=float, help="RMSProp smoothing constant.") parser.add_argument("--momentum", default=0, type=float, help="RMSProp momentum.") parser.add_argument("--epsilon", default=0.01, type=float, help="RMSProp epsilon.") parser.add_argument("--grad_norm_clipping", default=40.0, type=float, help="Global gradient norm clip.") # yapf: enable logging.basicConfig( format=( "[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s" ), level=0, ) Buffers = typing.Dict[str, typing.List[torch.Tensor]] def act( flags, actor_index: int, free_queue: mp.SimpleQueue, full_queue: mp.SimpleQueue, model: torch.nn.Module, buffers: Buffers, initial_agent_state_buffers, ): try: logging.info("Actor %i started.", actor_index) timings = prof.Timings() # Keep track of how fast things are. gym_env = create_env(flags) seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little") gym_env.seed(seed) env = environment.Environment(gym_env) env_output = env.initial() agent_state = model.initial_state(batch_size=1) agent_output, unused_state = model(env_output, agent_state) while True: index = free_queue.get() if index is None: break # Write old rollout end. for key in env_output: buffers[key][index][0, ...] = env_output[key] for key in agent_output: buffers[key][index][0, ...] = agent_output[key] for i, tensor in enumerate(agent_state): initial_agent_state_buffers[index][i][...] = tensor # Do new rollout. for t in range(flags.unroll_length): timings.reset() with torch.no_grad(): agent_output, agent_state = model(env_output, agent_state) timings.time("model") env_output = env.step(agent_output["action"]) timings.time("step") for key in env_output: buffers[key][index][t + 1, ...] = env_output[key] for key in agent_output: buffers[key][index][t + 1, ...] = agent_output[key] timings.time("write") full_queue.put(index) if actor_index == 0: logging.info("Actor %i: %s", actor_index, timings.summary()) except KeyboardInterrupt: pass # Return silently. except Exception as e: logging.error("Exception in worker process %i", actor_index) traceback.print_exc() print() raise e def get_batch( flags, free_queue: mp.SimpleQueue, full_queue: mp.SimpleQueue, buffers: Buffers, initial_agent_state_buffers, timings, lock=threading.Lock(), ): with lock: timings.time("lock") indices = [full_queue.get() for _ in range(flags.batch_size)] timings.time("dequeue") batch = { key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers } # NOTE: AttentionNet is batch first. initial_agent_state = tuple( torch.cat(ts, dim=0) for ts in zip(*[initial_agent_state_buffers[m] for m in indices]) ) timings.time("batch") for m in indices: free_queue.put(m) timings.time("enqueue") batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()} initial_agent_state = tuple( t.to(device=flags.device, non_blocking=True) for t in initial_agent_state ) timings.time("device") return batch, initial_agent_state def learn( flags, actor_model, model, batch, initial_agent_state, optimizer, scheduler, lock=threading.Lock(), # noqa: B008 ): """Performs a learning (optimization) step.""" with lock: learner_outputs, unused_state = model(batch, initial_agent_state) # logging.info(f"learn() batch[frame] shape: {batch['frame'].shape}") # Take final value function slice for bootstrapping. bootstrap_value = learner_outputs["baseline"][-1] # Move from obs[t] -> action[t] to action[t] -> obs[t]. batch = {key: tensor[1:] for key, tensor in batch.items()} learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()} rewards = batch["reward"] if flags.reward_clipping == "abs_one": clipped_rewards = torch.clamp(rewards, -1, 1) elif flags.reward_clipping == "none": clipped_rewards = rewards discounts = (~batch["done"]).float() * flags.discounting vtrace_returns = vtrace.from_logits( behavior_policy_logits=batch["policy_logits"], target_policy_logits=learner_outputs["policy_logits"], actions=batch["action"], discounts=discounts, rewards=clipped_rewards, values=learner_outputs["baseline"], bootstrap_value=bootstrap_value, ) pg_loss = compute_policy_gradient_loss( learner_outputs["policy_logits"], batch["action"], vtrace_returns.pg_advantages, ) baseline_loss = flags.baseline_cost * compute_baseline_loss( vtrace_returns.vs - learner_outputs["baseline"] ) entropy_loss = flags.entropy_cost * compute_entropy_loss( learner_outputs["policy_logits"] ) total_loss = pg_loss + baseline_loss + entropy_loss episode_returns = batch["episode_return"][batch["done"]] # victor: for plotting episode_step episode_steps = batch["episode_step"][batch["done"]] stats = { "episode_returns": tuple(episode_returns.cpu().numpy()), "mean_episode_return": torch.mean(episode_returns).item(), # victor: "mean_episode_step": torch.mean(episode_steps).item(), "total_loss": total_loss.item(), "pg_loss": pg_loss.item(), "baseline_loss": baseline_loss.item(), "entropy_loss": entropy_loss.item(), } optimizer.zero_grad() total_loss.backward() nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping) optimizer.step() scheduler.step() actor_model.load_state_dict(model.state_dict()) return stats def create_buffers(flags, obs_shape, num_actions) -> Buffers: T = flags.unroll_length specs = dict( frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8), reward=dict(size=(T + 1,), dtype=torch.float32), done=dict(size=(T + 1,), dtype=torch.bool), episode_return=dict(size=(T + 1,), dtype=torch.float32), # victor episode_step=dict(size=(T + 1,), dtype=torch.float32), policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32), baseline=dict(size=(T + 1,), dtype=torch.float32), last_action=dict(size=(T + 1,), dtype=torch.int64), action=dict(size=(T + 1,), dtype=torch.int64), ) buffers: Buffers = {key: [] for key in specs} for _ in range(flags.num_buffers): for key in buffers: buffers[key].append(torch.empty(**specs[key]).share_memory_()) return buffers def train(flags): # pylint: disable=too-many-branches, too-many-statements if flags.xpid is None: flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S") plogger = file_writer.FileWriter( xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir ) checkpointpath = os.path.expandvars( os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar")) ) if flags.num_buffers is None: # Set sensible default for num_buffers. flags.num_buffers = max(2 * flags.num_actors, flags.batch_size) if flags.num_actors >= flags.num_buffers: raise ValueError("num_buffers should be larger than num_actors") if flags.num_buffers < flags.batch_size: raise ValueError("num_buffers should be larger than batch_size") T = flags.unroll_length B = flags.batch_size flags.device = None if not flags.disable_cuda and torch.cuda.is_available(): logging.info("Using CUDA.") flags.device = torch.device("cuda") else: logging.info("Not using CUDA.") flags.device = torch.device("cpu") env = create_env(flags) model = Net(num_actions=env.action_space.n) buffers = create_buffers(flags, env.observation_space.shape, model.num_actions) model.share_memory() # Add initial RNN state. initial_agent_state_buffers = [] for _ in range(flags.num_buffers): state = model.initial_state(batch_size=1) for t in state: t.share_memory_() initial_agent_state_buffers.append(state) actor_processes = [] ctx = mp.get_context("fork") free_queue = ctx.SimpleQueue() full_queue = ctx.SimpleQueue() for i in range(flags.num_actors): actor = ctx.Process( target=act, args=( flags, i, free_queue, full_queue, model, buffers, initial_agent_state_buffers, ), ) actor.start() actor_processes.append(actor) learner_model = Net(num_actions=env.action_space.n).to(device=flags.device) optimizer = torch.optim.RMSprop( learner_model.parameters(), lr=flags.learning_rate, momentum=flags.momentum, eps=flags.epsilon, alpha=flags.alpha, ) def lr_lambda(epoch): return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda) # victor: load state_dict from checkpoint_file if flags.checkpoint_file is not None: logging.info( f"Loading model, optimizer, scheduler from checkpoint: {flags.checkpoint_file}" ) checkpoint_loaded = torch.load(flags.checkpoint_file, map_location=flags.device) learner_model.load_state_dict(checkpoint_loaded["model_state_dict"]) optimizer.load_state_dict(checkpoint_loaded["optimizer_state_dict"]) scheduler.load_state_dict(checkpoint_loaded["scheduler_state_dict"]) learner_model.train() logger = logging.getLogger("logfile") stat_keys = [ "total_loss", "mean_episode_return", # victor "mean_episode_step", "pg_loss", "baseline_loss", "entropy_loss", ] logger.info("# Step\t%s", "\t".join(stat_keys)) step, stats = 0, {} def batch_and_learn(i, lock=threading.Lock()): """Thread target for the learning process.""" nonlocal step, stats timings = prof.Timings() while step < flags.total_steps: timings.reset() batch, agent_state = get_batch( flags, free_queue, full_queue, buffers, initial_agent_state_buffers, timings, ) stats = learn( flags, model, learner_model, batch, agent_state, optimizer, scheduler ) timings.time("learn") with lock: to_log = dict(step=step) to_log.update({k: stats[k] for k in stat_keys}) plogger.log(to_log) step += T * B if i == 0: logging.info("Batch and learn: %s", timings.summary()) for m in range(flags.num_buffers): free_queue.put(m) threads = [] for i in range(flags.num_learner_threads): thread = threading.Thread( target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,) ) thread.start() threads.append(thread) def checkpoint(): if flags.disable_checkpoint: return logging.info("Saving checkpoint to %s", checkpointpath) torch.save( { "model_state_dict": model.state_dict(), "optimizer_state_dict": optimizer.state_dict(), "scheduler_state_dict": scheduler.state_dict(), "flags": vars(flags), }, checkpointpath, ) def checkpoint_step(step): if flags.disable_checkpoint: return checkpointpath_step = os.path.expandvars( os.path.expanduser(f"{flags.savedir}/{flags.xpid}/model_{step}.tar") ) logging.info("Saving step checkpoint to %s", checkpointpath_step) torch.save( { "model_state_dict": model.state_dict(), "optimizer_state_dict": optimizer.state_dict(), "scheduler_state_dict": scheduler.state_dict(), "stats": stats, "flags": vars(flags), }, checkpointpath_step, ) timer = timeit.default_timer try: last_checkpoint_time = timer() while step < flags.total_steps: start_step = step start_time = timer() time.sleep(30) if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min. checkpoint() last_checkpoint_time = timer() if (start_step % (flags.total_steps // 10)) == 0: # Save step model every 10% of the way during training. checkpoint_step(start_step) sps = (step - start_step) / (timer() - start_time) if stats.get("episode_returns", None): mean_return = ( "Return per episode: %.1f. " % stats["mean_episode_return"] ) else: mean_return = "" total_loss = stats.get("total_loss", float("inf")) logging.info( "Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s", step, sps, total_loss, mean_return, pprint.pformat(stats), ) except KeyboardInterrupt: return # Try joining actors then quit. else: for thread in threads: thread.join() logging.info("Learning finished after %d steps.", step) finally: for _ in range(flags.num_actors): free_queue.put(None) for actor in actor_processes: actor.join(timeout=1) checkpoint() plogger.close() def test(flags, num_episodes: int = 10): if flags.xpid is None: checkpointpath = "./latest/model.tar" else: checkpointpath = os.path.expandvars( os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar")) ) gym_env = create_env(flags) env = environment.Environment(gym_env) model = Net(num_actions=env.action_space.n) model.eval() checkpoint = torch.load(checkpointpath, map_location="cpu") model.load_state_dict(checkpoint["model_state_dict"]) observation = env.initial() returns = [] video_frames = [] attention_frames = [] hidden_state = model.initial_state(batch_size=1) while len(returns) < num_episodes: if flags.mode == "test_render": env.gym_env.render() agent_outputs, new_hidden_state = model(observation, hidden_state) hidden_state = new_hidden_state policy_outputs, _ = agent_outputs observation = env.step(policy_outputs["action"]) if observation["done"].item(): returns.append(observation["episode_return"].item()) logging.info( "Episode ended after %d steps. Return: %.1f", observation["episode_step"].item(), observation["episode_return"].item(), ) hidden_state = model.initial_state(batch_size=1) if flags.mode == "write_videos": # Save numpy arrays, so we can make videos somewhere else. video_frames = np.asarray(video_frames) with open(videopath, "wb") as f: np.save(f, video_frames) attention_frames = np.asarray(attention_frames) with open(attentionpath, "wb") as f: np.save(f, attention_frames) env.close() logging.info( "Average returns over %i steps: %.1f", num_episodes, sum(returns) / len(returns) ) Net = attention_net.AttentionNet def main(flags): if flags.mode == "train": train(flags) else: test(flags) if __name__ == "__main__": flags = parser.parse_args() main(flags)
fallHandler.py
import double import sys import paho.mqtt.client as mqtt import time import json import datetime import re import toml import threading from notify_run import Notify from navigate import navigate from webpage.changeConfig import ChangeConfig #CONFIG = toml.load('config.toml') f = open('config.json') CONFIG = json.load(f) f.close() # Get constants from config file AVERAGE_TIME_WINDOW_SIZE = int(CONFIG['constants']['AVERAGE_TIME_WINDOW_SIZE']) ALERT_HEIGT = float(CONFIG['constants']['ALERT_HEIGT']) ALERT_WINDOW_SIZE = int(CONFIG['constants']['ALERT_WINDOW_SIZE']) ALERT_TIME_GAP = datetime.timedelta(minutes = float(CONFIG['constants']['ALERT_TIME_GAP'])) ALERT_TIME_GAP_START = datetime.timedelta(minutes = float(CONFIG['constants']['ALERT_TIME_GAP_START'])) class FallHandler: def __init__(self, fallSystemLock, globalVariables): self.globalVariables = globalVariables self.fallSystemLock = fallSystemLock # Load spot ids self.spotD3 = CONFIG['widefind']['spotD3'] self.spotUser = CONFIG['widefind']['spotUser'] self.zUser = {} # Dictionary: key = time for gotten mqtt msg, value = Z value in mqtt msg self.zUserAverage = 0 self.xD3 = None self.yD3 = None self.zD3 = None self.alertTimeCooldown = datetime.datetime.now() - ALERT_TIME_GAP + ALERT_TIME_GAP_START # Calculates initial cooldown self.notify = Notify() # print('self.alertTimeCooldown: ',self.alertTimeCooldown) def startSystem(self): self.updateConstants() broker_address = CONFIG['widefind']['broker_address'] self.client = mqtt.Client() self.client.on_message=self.on_message self.client.connect(broker_address) self.client.subscribe(CONFIG['widefind']['subscribe_address']) print('System Started') self.client.loop_forever() def updateMovingZAverage(self, currentTime, currentZCord): # Remove old z cords until the difference is less then AVERAGE_TIME_WINDOW_SIZE for x in list(self.zUser): delta = currentTime - x if(delta.seconds > AVERAGE_TIME_WINDOW_SIZE): newSum = self.zUserAverage*len(self.zUser) - self.zUser[x] # We continually recalculate the average Z value for every removed element del self.zUser[x] if(len(self.zUser) == 0): # Edge case to avoid dividing by zero self.zUserAverage = 0 else: self.zUserAverage = newSum/len(self.zUser) # Normal execution else: break # Update new average with a new time and cords newSum = self.zUserAverage*len(self.zUser) + currentZCord self.zUser.update({currentTime: currentZCord}) self.zUserAverage = newSum/len(self.zUser) def isSpotUser(self, jsonMsg): return self.spotUser in jsonMsg def isSpotD3(self, jsonMsg): return self.spotD3 in jsonMsg def checkSystem(self): if(not self.globalVariables.systemOn): try: # now = datetime.datetime.now() # current_time = now.strftime("%H:%M:%S") self.client.disconnect() # self.client.unsubscribe(adress) print('System wait, time:', current_time) self.fallSystemLock.acquire() self.fallSystemLock.release() now = datetime.datetime.now() current_time = now.strftime("%H:%M:%S") print('System start, time:', current_time) self.startSystem() except(): print("uh oh") if(self.globalVariables.changedSettings): self.updateConstants() print('Updated constants') # def on_unsubscribe(self, client, userdata, mid): # Only for test # print('UNSUB') # def on_subscribe(self, client, userdata, mid, granted_qos):# Only for test # print('SUB') def on_message(self, client, userdata, message): self.checkSystem() mqttMsgString = message.payload.decode() mqttMsgJson = json.loads(mqttMsgString) jsonMsg = json.dumps(mqttMsgJson) print(jsonMsg) if self.isSpotUser(jsonMsg): self.handleSpotUser(jsonMsg) if self.isSpotD3(jsonMsg): self.handleSpotD3(jsonMsg) def handleSpotUser(self, jsonMsg): cordinates = self.getCordinates(jsonMsg) # cordinates = [x, y, z] currentTime = self.getTime(jsonMsg) # currentTime = A datetime variable currentZCord = cordinates[2] self.updateMovingZAverage(currentTime, currentZCord) print('User', cordinates, ' self.zUserAverage: ', self.zUserAverage, ' self.checkZHeight(currentZCord): ', self.checkZHeight(currentZCord), ' len(self.zUser) >= ALERT_WINDOW_SIZE. ', len(self.zUser) >= ALERT_WINDOW_SIZE, ' self.isAlertTimeOffCooldown(currentTime): ', self.isAlertTimeOffCooldown(currentTime)) # Fall detection: Alert if Z height is low and enough #measurments and no alert cooldown checkIfFall = self.checkZHeight(currentZCord) and len(self.zUser) >= ALERT_WINDOW_SIZE and self.isAlertTimeOffCooldown(currentTime) if(checkIfFall): self.updateAlertTimeCooldown(currentTime) self.alert(cordinates[0], cordinates[1]) def handleSpotD3(self, jsonMsg): cordinates = self.getCordinates(jsonMsg) # cordinates = [x, y, z] self.xD3 = cordinates[0] self.yD3 = cordinates[1] self.zD3 = cordinates[2] print('D3', cordinates) def isAlertTimeOffCooldown(self, currentTime): delta = currentTime - self.alertTimeCooldown # print('delta: ', delta) if(delta > ALERT_TIME_GAP): return True return False def updateAlertTimeCooldown(self, currentTime): self.alertTimeCooldown = currentTime # Average and current Z heigt is below set alert height def checkZHeight(self, currentZCord): return(self.zUserAverage < ALERT_HEIGT and currentZCord < ALERT_HEIGT) def alert(self, targetX, targetY): self.globalVariables.doNotifyLoop = True # Start alarm loop until someone connects to the robot via the drive website notifyLoopThread = threading.Thread(target=self.notifyLoop, args=()) notifyLoopThread.start() if(self.xD3 != None and self.yD3 != None): print('Started drive') nav = navigate() widefindStart = [self.xD3, self.yD3] widefindDest = [targetX, targetY] print(widefindStart, ' --> ', widefindDest) d3Dest = nav.calcWFtoD3(widefindStart, widefindDest) nav.driveWhenFall(d3Dest[0], d3Dest[1]) def notifyLoop(self): with open('config.json', 'r') as jsonFile: data = json.load(jsonFile) username = data['user']['Name'] address = data['user']['Address'] d3name = data['user']['D3Name'] waitTime = 60 minutes = 0 while(self.globalVariables.doNotifyLoop): sendstr = "FALL DETEKTERAT (" + str(round(minutes, 2)) + " min sedan): " + username + ', ' + address + ', har ramlat anslut omgående. D3Robot: ' + d3name print(sendstr) self.notify.send(sendstr) minutes += waitTime/60 time.sleep(waitTime) # Returns cordinates of Widefind mqtt data in (meter) def getCordinates(self, jsonMqttMsg): parse = json.loads(jsonMqttMsg) # print(parse) messageData = parse['message'] splitMessageData = messageData.split(",") x = (float(splitMessageData[2])/1000) y = (float(splitMessageData[3])/1000) z = (float(splitMessageData[4])/1000) return [x, y, z] # Returns time of Widefind mqtt data def getTime(self, jsonMqttMsg): parse = json.loads(jsonMqttMsg) timeData = parse['time'] splitTimeData = re.split(r"[-T:.Z]\s*", timeData) currentTime = datetime.datetime(int(splitTimeData[0]), int(splitTimeData[1]), int(splitTimeData[2]), int(splitTimeData[3]), int(splitTimeData[4]), int(splitTimeData[5]), int(splitTimeData[6][:6])) return currentTime #Read data from config.json and update all constants def updateConstants(self): f = open('config.json') CONFIG = json.load(f) f.close() AVERAGE_TIME_WINDOW_SIZE = int(CONFIG['constants']['AVERAGE_TIME_WINDOW_SIZE']) ALERT_HEIGT = float(CONFIG['constants']['ALERT_HEIGT']) ALERT_WINDOW_SIZE = int(CONFIG['constants']['ALERT_WINDOW_SIZE']) ALERT_TIME_GAP = datetime.timedelta(minutes = float(CONFIG['constants']['ALERT_TIME_GAP'])) ALERT_TIME_GAP_START = datetime.timedelta(minutes = float(CONFIG['constants']['ALERT_TIME_GAP_START'])) self.globalVariables.changedSettings = False if __name__ == '__main__': s = FallHandler()
cb-command_r.py
#!/usr/env python # CB-Command_R # Carbon Black Response - Mass Command Line Data Extractor # gfoss[at]carbonblack.com # March, 2019 import sys, time, argparse, requests, json, threading, thread from config import active global_lock = threading.Lock() file_contents = [] def parse_all_things(): parser = argparse.ArgumentParser(description = 'Multithreaded large-scale Carbon Black Response Command Line Data Extraction') parser.add_argument('-q', '--query', help = 'Carbon Black Response Query - Default: (process_name:cmd.exe)', default='process_name:cmd.exe', dest='query') parser.add_argument('-t', '--threads', help = 'Number of simultaneous threads - Default: 25', default='25', dest='threads') parser.add_argument('-r', '--rows', help = 'Rows per thread (USE MULTIPLES OF 10!) - Default: 1000', default='1000', dest='rows') parser.add_argument('-s', '--start', help = 'Select the starting row - Default: 0', default='0', dest='start') parser.add_argument('-f', '--filename', help = 'Output results - Default: commands.txt', default='commands.txt', dest='filename', ) # # usage: cb-command_r.py [-h] [-q QUERY] [-t THREADS] [-r ROWS] [-s START] [-f FILENAME] # # Multithreaded large-scale Carbon Black Response Commandline Data Extraction # # optional arguments: # -h, --help show this help message and exit # -q QUERY, --query QUERY # Carbon Black Response Query # Default: (process_name:cmd.exe) # -t THREADS, --threads THREADS # Number of simultaneous threads # Default: 25 # -r ROWS, --rows ROWS # Rows per thread (USE MULTIPLES OF 10!) # Default: 1000 # -s START, --start START # Select the starting row # Default: 0 # -f FILENAME, --filename FILENAME # Output results # Default: commands.txt # return parser def extractor(parser, args, start_count): url = active['url'] api_key = active['key'] query = args.query querystring = {"q":args.query,"rows":args.rows,"start":start_count} payload = "" headers = { 'X-Auth-Token': api_key } # If you receieve SSL certificate errors, add ", verify=False" to the below request response = requests.request("GET", url, data=payload, headers=headers, params=querystring) data = json.loads(response.content) if int(args.threads) > 1: while global_lock.locked(): continue global_lock.acquire() rows = int(args.rows) for num in range(rows): datas = (data['results'][num]['cmdline']).encode('utf8') file_contents.append(datas) global_lock.release() else: orig_stdout = sys.stdout f = open(args.filename, 'a') sys.stdout = f rows = int(args.rows) for num in range(rows): print (data['results'][num]['cmdline']).encode('utf8') sys.stdout = orig_stdout f.close() def main(): print ''' ________ _____ __ ___ / ___/ _ )____/ ___/__ __ _ __ _ ___ ____ ___/ / / _ \\ / /__/ _ /___/ /__/ _ \\/ \' \\/ \' \\/ _ `/ _ \\/ _ / / , _/ \\___/____/ \\___/\\___/_/_/_/_/_/_/\\_,_/_//_/\\_,_/__/_/|_| /___/ ''' parser = parse_all_things() args = parser.parse_args() thread_count = args.threads start_count = args.start rows = args.rows if int(thread_count) > 1: print 'Extracting the last ' + thread_count + str(rows)[1:] + ' commands related to: ' + args.query print 'Running with ' + thread_count + ' threads!' print '' thread_count = int(thread_count) threads = [] for num in range(thread_count): iteration = str(rows)[1:] start_count = str(num) + iteration print 'Pulling ' + start_count + ' rows of command line data' t = threading.Thread(target=extractor, args=(parser,args,start_count,)) threads.append(t) t.start() [thread.join() for thread in threads] with open(args.filename, 'a+') as file: file.write('\n'.join(file_contents)) file.close() print '' print 'Writing output to ' + args.filename print '' else: print "Making a single API request for " + rows + " records..." extractor(parser, args, start_count) print '' print 'Writing output to ' + args.filename print '' if __name__ == "__main__": main()
restore_coordinator.py
# Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/ import contextlib import enum import json import logging import multiprocessing import os import queue import pymysql import threading import time from contextlib import suppress from pghoard.rohmu import errors as rohmu_errors from pghoard.rohmu import get_transfer from .append_only_state_manager import AppendOnlyStateManager from .backup_stream import BINLOG_BUCKET_SIZE from .basebackup_restore_operation import BasebackupRestoreOperation from .binlog_downloader import download_binlog from .errors import BadRequest from .state_manager import StateManager from .util import ( add_gtid_ranges_to_executed_set, build_gtid_ranges, change_master_to, make_gtid_range_string, mysql_cursor, parse_fs_metadata, parse_gtid_range_string, read_gtids_from_log, relay_log_name, rsa_decrypt_bytes, sort_and_filter_binlogs, track_rate ) # "Could not initialize master info structure; more error messages can be found in the MySQL error log" # Happens when using multithreaded SQL apply and provided relay logs do not contain sufficient data to # initialize the threads. ER_MASTER_INFO = 1201 class RestoreCoordinator(threading.Thread): """Restores an existing backup. Starts by restoring the basebackup and then applies necessary binlogs on top of that. Restoration is performed on a separate thread, which may run further threads, e.g. for managing input streams. For downloading, decrypting and decompressing binlogs a process pool is used to make sure multiple CPU cores can be utilized for parallel processing.""" # Don't try reading binlogs more often that this if previous call to read binlogs # read in all binlogs that were available at that time BINLOG_POLL_INTERVAL = 30 ITERATION_SLEEP_SHORT = 0.2 ITERATION_SLEEP_LONG = 10 # If restoring basebackup fails four times for whatever reason, mark this restoration as # failed (Phase.failed_basebackup). This is only intended to be triggered in cases where # basebackup is corrupt and cannot be restored. Controller will try restoring older basebackup # if available (plus binlogs from the backup we were trying to restore) when this happens. MAX_BASEBACKUP_ERRORS = 4 @enum.unique class Phase(str, enum.Enum): getting_backup_info = "getting_backup_info" initiating_binlog_downloads = "initiating_binlog_downloads" restoring_basebackup = "restoring_basebackup" refreshing_binlogs = "refreshing_binlogs" applying_binlogs = "applying_binlogs" waiting_for_apply_to_finish = "waiting_for_apply_to_finish" finalizing = "finalizing" completed = "completed" failed = "failed" # Terminal state for a RestoreCoordinator instance but restoring an earlier backup may be an option failed_basebackup = "failed_basebackup" POLL_PHASES = {Phase.waiting_for_apply_to_finish} def __init__( self, *, binlog_streams, file_storage_config, max_binlog_bytes=None, mysql_client_params, mysql_config_file_name, mysql_data_directory, mysql_relay_log_index_file, mysql_relay_log_prefix, pending_binlogs_state_file, restart_mysqld_callback, rsa_private_key_pem, site, state_file, stats, stream_id, target_time=None, target_time_approximate_ok=None, temp_dir, ): super().__init__() self.basebackup_bytes_downloaded = 0 self.basebackup_restore_operation = None self.binlog_poll_interval = self.BINLOG_POLL_INTERVAL # Binary logs may be fetched from multiple consecutive backup streams. This is utilized if restoring # a basebackup fails for any reason but earlier backups are available and basebackup from one of those # can be successfully restored. self.binlog_streams = binlog_streams self.current_file = None self.file_storage = None self.file_storage_config = file_storage_config self.is_running = True self.iteration_sleep_long = self.ITERATION_SLEEP_LONG self.iteration_sleep_short = self.ITERATION_SLEEP_SHORT self.lock = threading.RLock() self.log = logging.getLogger(f"{self.__class__.__name__}/{stream_id}") self.max_binlog_count = None # Maximum bytes worth of binlogs to store on disk simultaneously. Note that this is # not an actual upper limit as the constraint is checked after adding new binlog # (or else it might be possible no binlogs can be downloaded) self.max_binlog_bytes = max_binlog_bytes self.mp_context = multiprocessing.get_context("spawn") self.mysql_client_params = mysql_client_params self.mysql_config_file_name = mysql_config_file_name self.mysql_data_directory = mysql_data_directory self.mysql_relay_log_index_file = mysql_relay_log_index_file self.mysql_relay_log_prefix = mysql_relay_log_prefix self.ongoing_prefetch_operations = {} # Number of pending binlogs can be potentially very large. Store those to separate file to avoid # the frequently updated main state growing so large that saving it causes noticeable overhead pending_binlogs = [] self.pending_binlog_manager = AppendOnlyStateManager( entries=pending_binlogs, lock=self.lock, state_file=pending_binlogs_state_file ) self.pending_binlogs = pending_binlogs self.queue_in = self.mp_context.Queue() self.queue_out = self.mp_context.Queue() self.restart_mysqld_callback = restart_mysqld_callback if not isinstance(rsa_private_key_pem, bytes): rsa_private_key_pem = rsa_private_key_pem.encode("ascii") self.rsa_private_key_pem = rsa_private_key_pem self.site = site # State contains variables that should be persisted over process restart # so that the operation resumes from where it was left (whenever possible) self.state = { "applying_binlogs": [], "binlogs_picked_for_apply": 0, "basebackup_info": {}, "basebackup_restore_duration": None, "basebackup_restore_errors": 0, "binlog_name_offset": 0, # Corrected binlog position to use instead of the position stored in basebackup info. "binlog_position": None, "binlog_stream_offset": 0, "binlogs_restored": 0, "completed_info": None, "current_binlog_bucket": 0, "current_binlog_stream_index": 0, "current_executed_gtid_target": {}, "current_relay_log_target": None, # This is required so that we can correctly update pending_binlogs state file if updating that # fails after the main state has already been updated "expected_first_pending_binlog_remote_index": None, "gtid_executed": None, "gtids_patched": False, "file_fail_counters": {}, "force_complete": False, "last_flushed_index": 0, "last_poll": None, "last_processed_index": None, "last_renamed_index": 0, "mysql_params": None, "phase": self.Phase.getting_backup_info, "prefetched_binlogs": {}, "promotions": [], "remote_read_errors": 0, "restore_errors": 0, "server_uuid": None, "target_time_reached": False, "write_relay_log_manually": False, } self.state_manager = StateManager(lock=self.lock, state=self.state, state_file=state_file) self.stats = stats self.stream_id = stream_id self.target_time = target_time self.target_time_approximate_ok = target_time_approximate_ok self.temp_dir = temp_dir self.worker_processes = [] def add_new_binlog_streams(self, new_binlog_streams): if not self.can_add_binlog_streams(): return False self.binlog_streams = self.binlog_streams + new_binlog_streams return True @property def basebackup_bytes_total(self): return self.state["basebackup_info"].get("compressed_size") or 0 @property def binlogs_being_restored(self): return len(self.state["applying_binlogs"] or []) @property def binlogs_pending(self): with self.lock: return len(self.pending_binlogs) @property def binlogs_restored(self): return self.state["binlogs_restored"] def can_add_binlog_streams(self): # If we're restoring to a specific backup then we don't want to look for possible new backup # streams that we should restore. Also, if we've already decided to stop looking for binlogs # cannot add new ones or if we're already past the point of applying binlogs altogether we # obviously cannot do anything with new binlog streams. final_phases = {self.Phase.finalizing, self.Phase.completed, self.Phase.failed, self.Phase.failed_basebackup} return not self.target_time and not self.state["target_time_reached"] and self.phase not in final_phases def force_completion(self): if self.phase == self.Phase.waiting_for_apply_to_finish: self.update_state(force_complete=True) else: raise BadRequest("Completion can only be forced while waiting for binlog apply to finish") def is_complete(self): return self.phase == self.Phase.completed @property def phase(self): return self.state["phase"] def run(self): self.log.info("Restore coordinator running") self._start_process_pool() # If we're in a state where binary logs should be downloaded ensure we have appropriate # download operations scheduled. If restore coordinator is destroyed and new one re-created # in the middle of applying binary logs we could end up not having any ongoing download # operations for the newly created restore coordinator, causing restoration to stall self._queue_prefetch_operations() while self.is_running: try: if not self.file_storage: self.log.info("Creating file storage accessor") self.file_storage = get_transfer(self.file_storage_config) if self.phase == self.Phase.getting_backup_info: self.get_backup_info() if self.phase == self.Phase.initiating_binlog_downloads: self.initiate_binlog_downloads() if self.phase == self.Phase.restoring_basebackup: self.restore_basebackup() if self.phase == self.Phase.refreshing_binlogs: self.refresh_binlogs() if self.phase == self.Phase.applying_binlogs: self.apply_binlogs() if self.phase == self.Phase.waiting_for_apply_to_finish: if self.wait_for_apply_to_finish(): continue if self.phase == self.Phase.finalizing: self.finalize_restoration() if self.phase in {self.Phase.completed, self.Phase.failed, self.Phase.failed_basebackup}: break # Blocks for up to self._get_iteration_sleep() seconds if there are no events in queue self.read_queue() except Exception as ex: # pylint: disable=broad-except self.log.exception("Unexpected exception while restoring backup") self.stats.unexpected_exception(ex=ex, where="RestoreCoordinator.run") self.state_manager.increment_counter(name="restore_errors") self.stats.increase("myhoard.restore_errors") time.sleep(min(self._get_iteration_sleep(), 2)) self.is_running = False @property def server_uuid(self): return self.state["server_uuid"] def stop(self): self.log.info("Stopping restore coordinator") self.is_running = False self.queue_in.put(None) for _ in range(len(self.worker_processes)): self.queue_out.put(None) # Thread might not have been started or could've already been joined, we don't care about that with suppress(Exception): self.join() for worker in self.worker_processes: worker.join() self.worker_processes = [] self.log.info("Restore coordinator stopped") def get_backup_info(self): if not self.state["completed_info"]: completed_info = self._load_file_data("completed.json") if not completed_info: self.log.error("Backup is not complete, cannot restore") self.state_manager.increment_counter(name="restore_errors") return self.update_state(completed_info=completed_info) basebackup_info = self._load_file_data("basebackup.json") if not basebackup_info: return self.update_state( basebackup_info=basebackup_info, phase=self.Phase.initiating_binlog_downloads, ) def initiate_binlog_downloads(self): self._fetch_more_binlog_infos() self.update_state(phase=self.Phase.restoring_basebackup) def restore_basebackup(self): start_time = time.monotonic() encryption_key = rsa_decrypt_bytes( self.rsa_private_key_pem, bytes.fromhex(self.state["basebackup_info"]["encryption_key"]) ) self.basebackup_restore_operation = BasebackupRestoreOperation( encryption_algorithm="AES256", encryption_key=encryption_key, mysql_config_file_name=self.mysql_config_file_name, mysql_data_directory=self.mysql_data_directory, stats=self.stats, stream_handler=self._basebackup_data_provider, temp_dir=self.temp_dir, ) try: self.basebackup_restore_operation.restore_backup() duration = time.monotonic() - start_time self.log.info("Basebackup restored in %.2f seconds", duration) self.update_state( phase=self.Phase.refreshing_binlogs, basebackup_restore_duration=duration, ) except Exception as ex: # pylint: disable=broad-except self.log.exception("Failed to restore basebackup: %r", ex) self.stats.unexpected_exception(ex=ex, where="RestoreCoordinator.restore_basebackup") self.state_manager.increment_counter(name="basebackup_restore_errors") self.state_manager.increment_counter(name="restore_errors") self.stats.increase("myhoard.restore_errors") if self.state["basebackup_restore_errors"] >= self.MAX_BASEBACKUP_ERRORS: self.log.error( "Restoring basebackup failed %s times, assuming the backup is broken", self.MAX_BASEBACKUP_ERRORS ) self.update_state(phase=self.Phase.failed_basebackup) self.stats.increase("myhoard.basebackup_broken") finally: self.basebackup_restore_operation = None def refresh_binlogs(self): self._fetch_more_binlog_infos(force=True) if not self.pending_binlogs: self.log.info("No binary logs available, marking restore completed immediately") self.update_state(phase=self.Phase.finalizing) else: self.update_state(phase=self.Phase.applying_binlogs) def apply_binlogs(self): binlogs = self._get_binlogs_to_apply() if not binlogs: return names = [ self._relay_log_name(index=binlog["adjusted_remote_index"] + self.state["binlog_name_offset"], full_path=False) for binlog in binlogs ] last_range = None for binlog in binlogs: if binlog["gtid_ranges"]: last_range = binlog["gtid_ranges"][-1] last_remote_index = binlogs[-1]["adjusted_remote_index"] relay_log_target = last_remote_index + self.state["binlog_name_offset"] + 1 mysql_params = {"with_binlog": False, "with_gtids": True} mysql_started = self.state["mysql_params"] == mysql_params initial_round = binlogs[0]["adjusted_remote_index"] == 1 final_round = binlogs[-1]["adjusted_remote_index"] == self.pending_binlogs[-1]["adjusted_remote_index"] if initial_round and not mysql_started: self._rename_prefetched_binlogs(binlogs) with open(self.mysql_relay_log_index_file, "wb") as index_file: self.log.info("Writing relay log names from %r to %r", names[0], names[-1]) # File must end with linefeed or else last line will not be processed correctly index_file.write(("\n".join(names) + "\n").encode("utf-8")) self._patch_gtid_executed(binlogs[0]) all_gtids_applied = False until_after_gtids = None # If we're restoring to a specific target time get the GTID until which we should be restoring, unless # self.target_time_approximate_ok is True in which case it's OK to restore until the end of the relay # log containing the target time (which avoids MySQL switching to single threaded processing). if final_round and self.target_time and not self.target_time_approximate_ok and binlogs[-1]["gtid_ranges"]: renamed = last_remote_index <= self.state["last_renamed_index"] if renamed: file_name = self._relay_log_name( index=last_remote_index + self.state["binlog_name_offset"] ) else: file_name = self._relay_log_prefetch_name(index=last_remote_index) ranges = list(build_gtid_ranges(read_gtids_from_log(file_name, read_until_time=self.target_time))) if ranges: last_range = ranges[-1] until_after_gtids = "{}:{}".format(last_range["server_uuid"], last_range["end"]) # Don't expect any specific file because if the GTID we're including is the very last entry # in the file the SQL thread might switch to next file and if it is earlier then it won't # so we'd need to be watching for two file names. Because execution is always single threaded # checking just the commit should be sufficient anyway relay_log_target = None self.log.info("Restoring up to and including target GTID %r", until_after_gtids) else: self.log.info("No GTID ranges found in last file with given target timestamp, finalizing restore") all_gtids_applied = True if initial_round and not mysql_started: self._purge_old_slave_data() self._ensure_mysql_server_is_started(**mysql_params) if not all_gtids_applied: with self._mysql_cursor() as cursor: if not initial_round or mysql_started: self._generate_updated_relay_log_index(binlogs, names, cursor) # Start from where basebackup ended for first binlog and for later iterations after file magic bytes initial_position = self.state["binlog_position"] or self.state["basebackup_info"]["binlog_position"] or 4 relay_log_pos = initial_position if initial_round else 4 self.log.info("Changing master position to %s in file %s", relay_log_pos, names[0]) try: change_master_to( cursor=cursor, options={ "MASTER_AUTO_POSITION": 0, "MASTER_HOST": "dummy", "RELAY_LOG_FILE": names[0], "RELAY_LOG_POS": relay_log_pos, }, ) except (pymysql.err.InternalError, pymysql.err.OperationalError) as ex: if ex.args[0] != ER_MASTER_INFO: raise ex # In some situations the MySQL SQL threads go into a bad state and always fail when doing # CHANGE MASTER TO. It's not clear what's the exact case when that happens but seems to be # related to applying relay log that contains some transactions that have already been # previously applied. Making more relay logs available does not help. Only RESET SLAVE # seems to fix it. # Unfortunately RESET SLAVE is not always safe. Namely if there are any temporary tables those # get dropped and restoration will not be successful so we cannot use this approach when any # temp tables exist. In such cases there's no easy solution. Starting from scratch with in # single threaded mode would work. self.log.warning("Failed to initialize new restore position: %r", ex) self.stats.increase("myhoard.restore.change_master_to_failed") cursor.execute("SELECT COUNT(*) AS count FROM INFORMATION_SCHEMA.INNODB_TEMP_TABLE_INFO") temp_tables = cursor.fetchone()["count"] if temp_tables: # TODO: Should automatically redo the entire restoration from scratch with single thread self.log.error("%s temporary tables exist, cannot safely perform RESET SLAVE", temp_tables) self.stats.increase("myhoard.restore.cannot_reset") raise ex # Next attempt might work better if we have more binary logs available so try fetching some self._fetch_more_binlogs() # Reset the binlogs picked for apply value so that new binlogs are added to the list as soon # as those have been downloaded self.update_state(binlogs_picked_for_apply=0) # Undo rename for files in current batch (RESET SLAVE would delete all the files) self._rename_prefetched_binlogs_back(binlogs) cursor.execute("RESET SLAVE") # Store new index adjustment; first binlog in current list should be number one. # Also FLUSH RELAY LOGS has no effect right after RESET SLAVE so instruct later code # to manually regenerate new relay index file. self.update_state( binlog_name_offset=1 - binlogs[0]["adjusted_remote_index"], write_relay_log_manually=True ) return sql = "START SLAVE SQL_THREAD" if until_after_gtids: sql += f" UNTIL SQL_AFTER_GTIDS = '{until_after_gtids}'" cursor.execute(sql) prefetched_binlogs = self.state["prefetched_binlogs"] for binlog in binlogs: del prefetched_binlogs[binlog["remote_key"]] pending_binlogs = self.pending_binlogs[len(binlogs):] # Mark target_time_reached as True if we started applying the last binlog whose info we had previously # fetched to avoid more binlogs being retrieved in case we're syncing against active master target_time_reached = self.state["target_time_reached"] if not pending_binlogs: # TODO: Some time based threshold might be better. Like if more than 10 minutes elapsed while processing # the last batch then try fetching still more entries, otherwise consider sync to be complete. # If the last batch takes a long time to apply it could be the master that will be connected to has # already purged the binary logs that are needed by this server. target_time_reached = True applying_binlogs = [] for binlog in binlogs: applying_binlogs.append({ "adjusted_index": binlog["adjusted_remote_index"] + self.state["binlog_name_offset"], "file_size": binlog["file_size"], "gtid_ranges": binlog["gtid_ranges"], }) if all_gtids_applied: applying_binlogs = [] with self.lock: if pending_binlogs: expected_first_pending_binlog_remote_index = pending_binlogs[0]["adjusted_remote_index"] else: expected_first_pending_binlog_remote_index = None self.update_state( applying_binlogs=applying_binlogs, binlogs_picked_for_apply=0, current_executed_gtid_target=last_range, current_relay_log_target=relay_log_target, expected_first_pending_binlog_remote_index=expected_first_pending_binlog_remote_index, phase=self.Phase.finalizing if all_gtids_applied else self.Phase.waiting_for_apply_to_finish, prefetched_binlogs=prefetched_binlogs, target_time_reached=target_time_reached, ) self.pending_binlog_manager.remove_many_from_head(len(binlogs)) def wait_for_apply_to_finish(self): if self.state["force_complete"]: self.log.warning("Force completion requested. Treating binlog restoration as complete") self.update_state( applying_binlogs=[], phase=self.Phase.finalizing, prefetched_binlogs=[], ) return True expected_first_index = self.state["expected_first_pending_binlog_remote_index"] if expected_first_index: count_to_drop = 0 for binlog in self.pending_binlogs: if binlog["adjusted_remote_index"] < expected_first_index: count_to_drop += 1 else: break if count_to_drop > 0: self.pending_binlog_manager.remove_many_from_head(count_to_drop) self.state_manager.update_state(expected_first_pending_binlog_remote_index=None) self._fetch_more_binlog_infos() apply_finished, current_index = self._check_sql_slave_status() applying_binlogs = self.state["applying_binlogs"] applied_binlog_count = 0 gtid_executed = self.state["gtid_executed"] or self.state["basebackup_info"]["gtid_executed"] for binlog in applying_binlogs: if binlog["adjusted_index"] >= current_index: break applied_binlog_count += 1 gtid_executed = add_gtid_ranges_to_executed_set(gtid_executed, binlog["gtid_ranges"]) if applied_binlog_count > 0: applying_binlogs = applying_binlogs[applied_binlog_count:] self.update_state( applying_binlogs=applying_binlogs, binlogs_restored=self.binlogs_restored + applied_binlog_count, gtid_executed=gtid_executed, ) self.stats.increase("myhoard.restore.binlogs_restored", applied_binlog_count) self._queue_prefetch_operations() if apply_finished: if not self.target_time or self.pending_binlogs: # Should not happen, here to catch programming errors assert not applying_binlogs, f"Some binlogs remained in {applying_binlogs!r} after completion" if self.pending_binlogs: phase = self.Phase.applying_binlogs else: self.log.info("Applied all pending binlogs, changing phase to 'finalizing'") phase = self.Phase.finalizing # Sometimes unexpected extra relay log files are generated. Take that into account when generating new # names so that we keep on creating the files with correct names offset = self.state["binlog_name_offset"] target_index = self.state["current_relay_log_target"] if target_index is not None and current_index > target_index: self.log.warning("Expected to reach binlog index %r but reached %r instead", target_index, current_index) self.stats.increase("myhoard.restore.unexpected_extra_relay_log") offset += (current_index - target_index) self.update_state(binlog_name_offset=offset, phase=phase) return apply_finished def finalize_restoration(self): # If there were no binary logs to restore MySQL server has not been started yet and trying # to connect to it would fail. If it hasn't been started (no mysql_params specified) it also # doesn't have slave configured or running so we can just skip the calls below. if self.state["mysql_params"]: with self._mysql_cursor() as cursor: cursor.execute("STOP SLAVE") # Do RESET SLAVE to ensure next CHANGE MASTER TO will work normally and also to get rid # of any possible leftover relay logs (if we did PITR there could be relay log with some # transactions that haven't been applied) cursor.execute("RESET SLAVE") self._ensure_mysql_server_is_started(with_binlog=True, with_gtids=True) self.update_state(phase=self.Phase.completed) self.log.info("Backup restoration completed") def read_queue(self): try: result = self.queue_in.get(timeout=self._get_iteration_sleep()) # Empty results may be posted to wake up the thread if not result: return self._process_work_queue_result(result) while True: try: result = self.queue_in.get(block=False) if result: self._process_work_queue_result(result) except queue.Empty: break except queue.Empty: pass def update_state(self, **kwargs): self.state_manager.update_state(**kwargs) def _are_all_gtids_executed(self, gtid_ranges): """Returns True if all GTIDs in the given list of GTID ranges have already been applied""" gtid_executed = self.state["gtid_executed"] or self.state["basebackup_info"]["gtid_executed"] # Run the original set of executed GTIDs through the same function to ensure format is exactly # the same so that direct equality comparison works as expected set1 = add_gtid_ranges_to_executed_set(gtid_executed) set2 = add_gtid_ranges_to_executed_set(gtid_executed, gtid_ranges) return set1 == set2 def _process_work_queue_result(self, result): key = result["remote_key"] binlog = self.ongoing_prefetch_operations.pop(key) fail_counters = self.state["file_fail_counters"] if result["result"] == "success": fail_counters.pop(key, None) self.log.info("Successfully prefetched %r (adjusted remote index %r)", key, binlog["adjusted_remote_index"]) prefetched_binlogs = self.state["prefetched_binlogs"] # TODO: Add some tracking for how long has elapsed since we got any results from # downloaders and if enough time has passed tear down processes, create new queues, # clear `ongoing_prefetch_operations`, restart processes and put download items back to queue self.update_state( file_fail_counters=fail_counters, prefetched_binlogs={ **prefetched_binlogs, key: binlog["file_size"] }, ) else: fail_counters[key] = fail_counters.get(key, 0) + 1 retry = fail_counters[key] < 3 if retry: self.log.error("Failed to fetch %r: %r. Retrying", key, result["message"]) result.pop("result") result.pop("message") self.queue_out.put(result) self.ongoing_prefetch_operations[key] = binlog self.update_state(file_fail_counters=fail_counters) else: self.log.error( "Failed to fetch %r: %r. Too many (%s) failures, marking restoration as failed", key, result["message"], fail_counters[key] ) self.update_state(file_fail_counters=fail_counters, phase=self.Phase.failed) def _build_binlog_full_name(self, name): binlog_stream = self.binlog_streams[self.state["current_binlog_stream_index"]] site = binlog_stream["site"] stream_id = binlog_stream["stream_id"] return f"{site}/{stream_id}/{name}" def _build_full_name(self, name): return f"{self.site}/{self.stream_id}/{name}" def _load_file_data(self, name, missing_ok=False): try: info_str, _ = self.file_storage.get_contents_to_string(self._build_full_name(name)) return json.loads(info_str) except rohmu_errors.FileNotFoundFromStorageError as ex: if not missing_ok: self.log.error("File %r not found from storage", name) self.stats.unexpected_exception(ex=ex, where="RestoreCoordinator._load_file_data") self.state_manager.increment_counter(name="remote_read_errors") self.stats.increase("myhoard.restore_errors") except Exception as ex: # pylint: disable=broad-except self.log.exception("Downloading file %r failed", name) self.stats.unexpected_exception(ex=ex, where="RestoreCoordinator._load_file_data") self.state_manager.increment_counter(name="remote_read_errors") self.stats.increase("myhoard.remote_read_errors") return None def _basebackup_data_provider(self, target_stream): name = self._build_full_name("basebackup.xbstream") compressed_size = self.state["basebackup_info"].get("compressed_size") file_storage = get_transfer(self.file_storage_config) last_time = [time.monotonic()] last_value = [0] self.basebackup_bytes_downloaded = 0 def download_progress(progress, max_progress): if progress and max_progress and compressed_size: # progress may be the actual number of bytes or it may be percentages self.basebackup_bytes_downloaded = int(compressed_size * progress / max_progress) # Track both absolute number and explicitly calculated rate. The rate can be useful as # a separate measurement because downloads are not ongoing all the time and calculating # rate based on raw byte counter requires knowing when the operation started and ended self.stats.gauge_int("myhoard.restore.basebackup_bytes_downloaded", self.basebackup_bytes_downloaded) last_value[0], last_time[0] = track_rate( current=self.basebackup_bytes_downloaded, last_recorded=last_value[0], last_recorded_time=last_time[0], metric_name="myhoard.restore.basebackup_download_rate", stats=self.stats, ) file_storage.get_contents_to_fileobj(name, target_stream, progress_callback=download_progress) def _get_iteration_sleep(self): if self.phase in self.POLL_PHASES: return self.iteration_sleep_short else: return self.iteration_sleep_long @staticmethod def _get_sorted_file_infos(infos): def build_sort_key(info): # name is path/index_server, e.g. 2019-01-12T07:43:20Z_7fba6afa-83f8-43e5-a565-0c6ab43386af/binlogs/0/100_2, # get the index part (100) as integer name = info["name"].rsplit("/", 1)[-1] index = name.split("_", 1)[0] return int(index) return sorted(infos, key=build_sort_key) def _list_binlogs_in_bucket(self, bucket): last_processed_index = self.state["last_processed_index"] new_binlogs = [] highest_index = 0 start_time = time.monotonic() target_time_reached_by_server = set() self.log.debug("Listing binlogs in bucket %s", bucket) try: list_iter = self.file_storage.list_iter(self._build_binlog_full_name(f"binlogs/{bucket}")) for info in self._get_sorted_file_infos(list_iter): binlog = parse_fs_metadata(info["metadata"]) # We may be handling binlogs from multiple streams. To make the other logic work, calculate # monotonically increasing index across all streams. (Individual streams have their indexes # always start from 1.) binlog["adjusted_remote_index"] = self.state["binlog_stream_offset"] + binlog["remote_index"] binlog["remote_key"] = info["name"] binlog["remote_size"] = info["size"] highest_index = max(highest_index, binlog["remote_index"]) if last_processed_index is not None and binlog["adjusted_remote_index"] <= last_processed_index: continue # We're handing binlogs in order. If we've reached target time for any earlier binlog then this # binlog must be out of range as well. This check is needed because we might have binlogs without # GTIDs that cannot be excluded based on start/end checks if binlog["server_id"] in target_time_reached_by_server: continue if self.target_time and binlog["gtid_ranges"]: if binlog["gtid_ranges"][0]["start_ts"] >= self.target_time: # We exclude entries whose time matches recovery target time so any file whose start_ts # is equal or higher than target time is certain not to contain data we're going to apply self.log.info( "Start time %s of binlog %s from server %s is after our target time %s, skipping", binlog["gtid_ranges"][0]["start_ts"], binlog["remote_index"], binlog["server_id"], self.target_time ) target_time_reached_by_server.add(binlog["server_id"]) continue if binlog["gtid_ranges"][0]["end_ts"] >= self.target_time: # Log and mark target time reached but include binlog and continue processing results. We may # get binlogs from multiple servers in some race conditions and we don't yet know if this binlog # was from a server that was actually valid at that point in time and some other server may have # binlogs that are still relevant. self.log.info( "End time %s of binlog %s from server %s is at or after our target time %s, target time reached", binlog["gtid_ranges"][0]["end_ts"], binlog["remote_index"], binlog["server_id"], self.target_time ) target_time_reached_by_server.add(binlog["server_id"]) new_binlogs.append(binlog) except rohmu_errors.FileNotFoundFromStorageError: pass except Exception as ex: # pylint: disable=broad-except self.log.error("Failed to list remote binlogs: %r", ex) self.stats.unexpected_exception(ex=ex, where="RestoreCoordinator._load_file_data") self.state_manager.increment_counter(name="remote_read_errors") self.stats.increase("myhoard.remote_read_errors") return None, None, None duration = time.monotonic() - start_time self.log.info("Found %s binlogs from bucket %s in %.2f seconds", len(new_binlogs), bucket, duration) return new_binlogs, highest_index, bool(target_time_reached_by_server) def _fetch_more_binlog_infos(self, force=False): if self.state["target_time_reached"]: return if not force and self.state["last_poll"] and time.time() - self.state["last_poll"] < self.binlog_poll_interval: return self._fetch_more_binlogs_infos_for_current_stream() while not self.state["target_time_reached"] and self._switch_to_next_binlog_stream(): self._fetch_more_binlogs_infos_for_current_stream() def _fetch_more_binlogs_infos_for_current_stream(self): bucket = self.state["current_binlog_bucket"] new_binlogs = [] while True: previous_bucket = bucket binlogs, highest_index, target_time_reached = self._list_binlogs_in_bucket(bucket) if binlogs is None: break # Move to next bucket of BINLOG_BUCKET_SIZE binlogs if the listing contained last binlog that # is expected to be found from current bucket if (highest_index + 1) % BINLOG_BUCKET_SIZE == 0: bucket += 1 new_binlogs.extend(binlogs) # If we reached target time or didn't have a full bucket there cannot be more binlogs # of interest available at this time if target_time_reached or previous_bucket == bucket: break if not new_binlogs: self.update_state( current_binlog_bucket=bucket, last_poll=time.time(), target_time_reached=target_time_reached, ) return # Also refresh promotions list so that we know which of the remote # binlogs are actually valid promotions = {} try: for info in self.file_storage.list_iter(self._build_binlog_full_name("promotions")): # There could theoretically be multiple promotions with the same # index value if new master got promoted but then failed before # managing to upload any binlogs. To cope with that only keep one # promotion info per server id (the one with most recent timestamp) info = parse_fs_metadata(info["metadata"]) existing = promotions.get(info["start_index"]) if existing and info["promoted_at"] < existing["promoted_at"]: continue promotions[info["start_index"]] = info self.log.info( "server_id %s valid starting from %s (at %s)", info["server_id"], info["start_index"], info["promoted_at"], ) except Exception as ex: # pylint: disable=broad-except # There should always be one promotion file so file not found is real error too self.log.error("Failed to list promotions: %r", ex) self.stats.unexpected_exception(ex=ex, where="RestoreCoordinator._fetch_more_binlog_infos") self.state_manager.increment_counter(name="remote_read_errors") self.stats.increase("myhoard.remote_read_errors") return if 1 not in promotions: self.state_manager.increment_counter(name="restore_errors") self.log.error("Missing initial promotion info: %r", promotions) return promotions = {start_index: info["server_id"] for start_index, info in promotions.items()} if self.pending_binlogs: last_index = self.pending_binlogs[-1]["adjusted_remote_index"] else: last_index = self.state["last_processed_index"] or 0 try: new_binlogs = sort_and_filter_binlogs( binlogs=new_binlogs, log=self.log, last_index=last_index, promotions=promotions ) except Exception as ex: # pylint: disable=broad-except self.log.error("Sorting and filtering binlogs failed: %r", ex) self.stats.unexpected_exception(ex=ex, where="RestoreCoordinator._fetch_more_binlog_infos") self.state_manager.increment_counter(name="restore_errors") self.stats.increase("myhoard.restore_errors") return if target_time_reached: # If list of binlogs ends with binlogs that don't have any GTIDs exclude those from the actual # list we're going to process. Later logic assumes that only the last binlog we're processing # may be at the target time boundary and having binlogs with no GTIDs at the end of the list # makes that logic fail. Binlogs with no GTIDs should be empty anyway so excluding them should # have no ill effect. while new_binlogs and not new_binlogs[-1]["gtid_ranges"]: self.log.info( "Dropping last new binlog %r because target time is reached and binlog is empty", new_binlogs[-1]["remote_index"] ) new_binlogs.pop() if not new_binlogs: self.update_state( current_binlog_bucket=bucket, last_poll=time.time(), target_time_reached=target_time_reached, ) return # If persisting new binlogs succeeding but persisting other state failed we might get same binlogs anew actual_new_binlogs = [] last_existing_index = self.pending_binlogs[-1]["adjusted_remote_index"] if self.pending_binlogs else None for binlog in new_binlogs: if not last_existing_index or binlog["adjusted_remote_index"] > last_existing_index: actual_new_binlogs.append(binlog) with self.lock: self.pending_binlog_manager.append_many(actual_new_binlogs) if new_binlogs: last_processed_index = new_binlogs[-1]["adjusted_remote_index"] else: last_processed_index = self.state["last_processed_index"] self.update_state( current_binlog_bucket=bucket, last_poll=time.time(), last_processed_index=last_processed_index, target_time_reached=target_time_reached, ) self._queue_prefetch_operations() self.stats.gauge_int("myhoard.restore.pending_binlogs", len(self.pending_binlogs)) def _fetch_more_binlogs(self, *, force=False): self._fetch_more_binlog_infos(force=force) self._queue_prefetch_operations(force=True) def _queue_prefetch_operations(self, *, force=False): on_disk_binlog_count = ( len(self.ongoing_prefetch_operations) + len(self.state["prefetched_binlogs"]) + len(self.state["applying_binlogs"]) ) ongoing_bytes = sum(binlog["file_size"] for binlog in self.ongoing_prefetch_operations.values()) prefetched_bytes = sum(self.state["prefetched_binlogs"].values()) applying_bytes = sum(binlog["file_size"] for binlog in self.state["applying_binlogs"]) on_disk_binlog_bytes = ongoing_bytes + prefetched_bytes + applying_bytes queued_non_empty = 0 for binlog in self.pending_binlogs: if not force or queued_non_empty: if self.max_binlog_count and on_disk_binlog_count >= self.max_binlog_count: break if self.max_binlog_bytes and on_disk_binlog_bytes >= self.max_binlog_bytes: break key = binlog["remote_key"] if key in self.ongoing_prefetch_operations or key in self.state["prefetched_binlogs"]: continue self.ongoing_prefetch_operations[key] = binlog props = { "compression_algorithm": binlog["compression_algorithm"], "remote_file_size": binlog["remote_size"], "local_file_name": self._relay_log_prefetch_name(index=binlog["adjusted_remote_index"]), "remote_key": key, } self.log.info("Queuing prefetch operation for %r", key) self.queue_out.put(props) if binlog["gtid_ranges"]: queued_non_empty += 1 on_disk_binlog_count += 1 on_disk_binlog_bytes += binlog["file_size"] @contextlib.contextmanager def _mysql_cursor(self): with mysql_cursor( host=self.mysql_client_params["host"], password=self.mysql_client_params["password"], port=self.mysql_client_params["port"], user=self.mysql_client_params["user"], ) as cursor: yield cursor def _parse_gtid_executed_ranges(self, binlog): binlog_position = self.state["basebackup_info"]["binlog_position"] if not binlog["gtid_ranges"] or not binlog_position: return [] # Scan all GTIDs before our start location and update server gtid_executed based on that. # xtrabackup does not enforce log rotation when it takes the backup and the actual executed # GTIDs may only be cached in memory and will not appear on this node because memory cache is # only guaranteed to be flushed on log rotation. MySQL server normally recovers by parsing # binlogs to see what has actually been executed but since we don't have a binlog in correct # state and generating one is cumbersome as well, manually update the table via SQL later. # While we're parsing the file also look for the actual location of the gtid value specified in # basebackup info and if the transaction following that is located before the reported start position # then adjust the actual start position accordingly. This is needed because the file_name and # file_position attributes in binlog_info don't seem to necessarily match the position that actually # contains the transaction following the basebackup. This may be # related to locking in MySQL code, # which could result in transaction getting written to binary log but GTID info not having been updated # when table_log_status locks both binlog and gtid status, getting mismatching binlog position and gtid # info. last_gnos = {} if self.state["basebackup_info"]["gtid"]: # "gtid" contains last value for each past server so need to parse it accordingly for uuid_str, ranges in parse_gtid_range_string(self.state["basebackup_info"]["gtid"]).items(): for rng in ranges: last_gnos[uuid_str] = rng[1] local_name = self._relay_log_name(index=binlog["adjusted_remote_index"] + self.state["binlog_name_offset"]) gtid_infos = [] found_last_entry = False for gtid_info in read_gtids_from_log(local_name, read_until_position=binlog_position): _timestamp, _server_id, uuid_str, gno, start_position = gtid_info last_gno = last_gnos.get(uuid_str) if last_gno == gno: found_last_entry = True elif found_last_entry or (last_gno is not None and gno > last_gno): if start_position != binlog_position: self.log.warning( "Basebackup binlog position %r differs from position %r of GTID %s:%r", binlog_position, start_position, uuid_str, gno ) self.update_state(binlog_position=start_position) break gtid_infos.append(gtid_info) return list(build_gtid_ranges(gtid_infos)) def _get_binlogs_to_apply(self): binlogs = [] binlogs_picked_for_apply = self.state["binlogs_picked_for_apply"] for idx, binlog in enumerate(self.pending_binlogs): if binlogs_picked_for_apply > 0: if idx < binlogs_picked_for_apply: binlogs.append(binlog) else: break elif binlog["remote_key"] in self.state["prefetched_binlogs"]: binlogs.append(binlog) else: break # Nothing available yet if not binlogs: return None # It seems that having only transactions that have already been executed or not # having any transactions at all leaves SQL threads in somehow bad state when using # multithreading and applying next batch wouldn't work. To avoid problems don't # apply a batch of binlogs unless there are some new transactions. gtid_ranges = binlogs[-1]["gtid_ranges"] if not gtid_ranges or self._are_all_gtids_executed(gtid_ranges): if self.ongoing_prefetch_operations: # We have some downloads still ongoing, more binlogs will become automatically in a bit self.log.info("Last binlog is either empty or has no new transactions. Waiting for more to become available") return None elif len(binlogs) < len(self.pending_binlogs): # No ongoing downloads but more binlogs are available. Schedule some to be downloaded self.log.info("Last binlog is either empty or has no new transactions. Scheduling more downloads") self._fetch_more_binlogs() return None else: # We have all binlogs that are available in file storage at this time. self.log.info("Last binlog is either empty or has no new transactions. Treating this as last batch") self.update_state(target_time_reached=True) self.update_state(binlogs_picked_for_apply=len(binlogs)) return binlogs def _generate_updated_relay_log_index(self, binlogs, names, cursor): # Should already be stopped but just to make sure cursor.execute("STOP SLAVE") cursor.execute("SHOW SLAVE STATUS") initial_relay_log_file = cursor.fetchone()["Relay_Log_File"] # Technically we'd want one fewer relay log file here but the server seems to have some # caching logic related to the current relay log and we need to make sure currently active # log is after the last log we want to replay to ensure all logs get applied last_flushed_index = self.state["last_flushed_index"] flush_count = 0 for binlog in binlogs: if binlog["adjusted_remote_index"] <= last_flushed_index: continue if not self.state["write_relay_log_manually"]: cursor.execute("FLUSH RELAY LOGS") flush_count += 1 last_flushed_index = binlog["adjusted_remote_index"] if flush_count > 0: if self.state["write_relay_log_manually"]: with open(self.mysql_relay_log_index_file, "wb") as index_file: self.log.info("Writing relay log names from %r to %r", names[0], names[-1]) # File must end with linefeed or else last line will not be processed correctly index_file.write(("\n".join(names) + "\n").encode("utf-8")) self.update_state(last_flushed_index=last_flushed_index, write_relay_log_manually=False) cursor.execute("SHOW SLAVE STATUS") final_relay_log_file = cursor.fetchone()["Relay_Log_File"] self.log.info( "Flushed relay logs %d times, initial file was %r and current is %r", flush_count, initial_relay_log_file, final_relay_log_file ) self._rename_prefetched_binlogs(binlogs) def _rename_prefetched_binlogs(self, binlogs): last_renamed_index = self.state["last_renamed_index"] for binlog in binlogs: remote_index = binlog["adjusted_remote_index"] if remote_index <= last_renamed_index: continue local_prefetch_name = self._relay_log_prefetch_name(index=remote_index) if os.path.exists(local_prefetch_name): local_name = self._relay_log_name(index=remote_index + self.state["binlog_name_offset"]) os.rename(local_prefetch_name, local_name) self.log.info("Renamed %s to %s", local_prefetch_name, local_name) last_renamed_index = remote_index self.update_state(last_renamed_index=last_renamed_index) def _purge_old_slave_data(self): # Remove potentially conflicting slave data from backup (unfortunately it's not possible to exclude these tables # from the backup using xtrabackup at the moment). In some cases, e.g. when rotate event appears in the relay # log (coming from master binlog) and mysql has some information about non-existing replication in these tables, # it tries to read previous relays logs in order to find last rotate event, but those relay logs do not exist # anymore. self._ensure_mysql_server_is_started(with_binlog=False, with_gtids=False) with self._mysql_cursor() as cursor: cursor.execute("DELETE FROM mysql.slave_master_info") cursor.execute("DELETE FROM mysql.slave_relay_log_info") cursor.execute("DELETE FROM mysql.slave_worker_info") cursor.execute("COMMIT") def _rename_prefetched_binlogs_back(self, binlogs): last_renamed_index = self.state["last_renamed_index"] for binlog in reversed(binlogs): remote_index = binlog["adjusted_remote_index"] if last_renamed_index < remote_index: continue local_name = self._relay_log_name(index=remote_index + self.state["binlog_name_offset"]) local_prefetch_name = self._relay_log_prefetch_name(index=remote_index) if os.path.exists(local_name): os.rename(local_name, local_prefetch_name) self.log.info("Renamed %s back to %s", local_name, local_prefetch_name) last_renamed_index = remote_index - 1 self.update_state(last_flushed_index=last_renamed_index, last_renamed_index=last_renamed_index) def _check_sql_slave_status(self): expected_range = self.state["current_executed_gtid_target"] expected_index = self.state["current_relay_log_target"] with self._mysql_cursor() as cursor: cursor.execute("SHOW SLAVE STATUS") slave_status = cursor.fetchone() current_file = slave_status["Relay_Log_File"] sql_running_state = slave_status["Slave_SQL_Running_State"] current_index = int(current_file.rsplit(".", 1)[-1]) if expected_index is not None and current_index < expected_index: self.log.debug("Expected relay log name not reached (%r < %r)", current_index, expected_index) if sql_running_state == "Slave has read all relay log; waiting for more updates": # Sometimes if the next file is empty MySQL SQL thread does not update the relay log # file to match the last one. Because the thread has finished doing anything we need # to react to the situation or else restoration will stall indefinitely. if expected_range: self.log.info( "SQL thread has finished executing even though target file has not been reached (%r < %r), " "target GTID range has been set. Continuing with GTID check", current_index, expected_index ) else: # We don't quite know if proceeding is safe but there's no other sensible action than # returning `True, expected_index` from this branch as we know there aren't any transactions # that should be applied anyway so there should be no data loss. self.log.warning( "SQL thread has finished executing even though target file has not been reached (%r < %r), " "no GTID range set. Considering complete", current_index, expected_index ) return True, expected_index else: return False, current_index # The batch we're applying might not have contained any GTIDs if not expected_range: self.log.info( "No expected GTID range available, assuming complete because Relay_Log_File (%r) matches", current_file ) found = True else: range_str = make_gtid_range_string([expected_range]) cursor.execute( "SELECT GTID_SUBSET(%s, @@GLOBAL.gtid_executed) AS executed, @@GLOBAL.gtid_executed AS gtid_executed", [range_str] ) result = cursor.fetchone() found = result["executed"] if found: self.log.info( "Expected log file %r reached and GTID range %r has been applied: %s", current_file, expected_range, result["gtid_executed"] ) # In some cases SQL thread doesn't change Relay_Log_File value appropriately. Update # the index we return from here to match expected index if all transactions have been # applied so that all applying binlogs are marked as completed even if the SQL thread # did not say so. if expected_index is not None and current_index < expected_index: current_index = expected_index if found: cursor.execute("STOP SLAVE") # Current file could've been updated since we checked it the first time before slave was stopped. # Get the latest value here so that we're sure to start from correct index cursor.execute("SHOW SLAVE STATUS") current_file = cursor.fetchone()["Relay_Log_File"] last_index = int(current_file.rsplit(".", 1)[-1]) if last_index > current_index: self.log.info("Relay index incremented from %s to %s after STOP SLAVE", current_index, last_index) current_index = last_index return found, current_index def _ensure_mysql_server_is_started(self, *, with_binlog, with_gtids): if self.state["mysql_params"] == {"with_binlog": with_binlog, "with_gtids": with_gtids}: return self.restart_mysqld_callback(with_binlog=with_binlog, with_gtids=with_gtids) server_uuid = self.state["server_uuid"] if not server_uuid: with self._mysql_cursor() as cursor: cursor.execute("SELECT @@GLOBAL.server_uuid AS server_uuid") server_uuid = cursor.fetchone()["server_uuid"] self.update_state(mysql_params={"with_binlog": with_binlog, "with_gtids": with_gtids}, server_uuid=server_uuid) def _patch_gtid_executed(self, binlog): if self.state["gtids_patched"]: return expected_gtid_executed_ranges = self._parse_gtid_executed_ranges(binlog) if not expected_gtid_executed_ranges: return self._ensure_mysql_server_is_started(with_binlog=False, with_gtids=False) with self._mysql_cursor() as cursor: for gtid_range in expected_gtid_executed_ranges: cursor.execute( ( "SELECT interval_start, interval_end FROM mysql.gtid_executed " " WHERE (source_uuid, interval_start) IN (" " SELECT source_uuid, MAX(interval_start) FROM mysql.gtid_executed " " WHERE source_uuid = %s GROUP BY source_uuid" " )" ), [gtid_range["server_uuid"]], ) existing_range = cursor.fetchone() if not existing_range: # Range doesn't exist if there were no entries with GTID by the time basebackup creation # completed if gtid_range["start"] == 1: cursor.execute(( "INSERT INTO mysql.gtid_executed (source_uuid, interval_start, interval_end) " " VALUES (%s, %s, %s)" ), (gtid_range["server_uuid"], gtid_range["start"], gtid_range["end"])) cursor.execute("COMMIT") else: # This is not expected to happen. We cannot ensure gtid_executed is in sane state if it # happens but applying old binlog is not dependent on this so allow continuing regardless self.log.error("Could not find existing gtid_executed info for range %r", gtid_range) continue if existing_range["interval_end"] == gtid_range["end"]: self.log.info("Existing gtid_executed info already up-to-date, no need to apply %r", gtid_range) continue if existing_range["interval_end"] != gtid_range["start"] - 1: # This usually shouldn't happen because gtid_executed is updated whenever binlog is # rotated so all missing values should've been found from the binlog we parsed. There # seem to be some corner cases where the backup still ends up containing older GTID # executed value so that there's a gap in the sequence. self.log.info( "Existing gtid_executed %r does not end just before new range %r", existing_range, gtid_range ) cursor.execute( ("UPDATE mysql.gtid_executed SET interval_end = %s " " WHERE source_uuid = %s AND interval_start = %s"), (gtid_range["end"], gtid_range["server_uuid"], existing_range["interval_start"]), ) cursor.execute("COMMIT") self.state.update(gtids_patched=True) def _relay_log_name(self, *, index, full_path=True): return relay_log_name(prefix=self.mysql_relay_log_prefix, index=index, full_path=full_path) def _relay_log_prefetch_name(self, *, index): local_name = self._relay_log_name(index=index) return f"{local_name}.prefetch" def _start_process_pool(self): process_count = max(multiprocessing.cpu_count() - 1, 1) config = { "object_storage": self.file_storage_config, "rsa_private_key_pem": self.rsa_private_key_pem.decode("ascii"), } self.worker_processes = [ self.mp_context.Process(target=download_binlog, args=(config, self.queue_out, self.queue_in)) for _ in range(process_count) ] for worker in self.worker_processes: worker.start() def _switch_to_next_binlog_stream(self): current_index = self.state["current_binlog_stream_index"] if current_index + 1 >= len(self.binlog_streams): return False # _switch_to_next_binlog_stream is only ever called when we have consumed all available binlogs from # the previous stream. The adjusted remote index for that is the number we'll want to add to the indexes # for next stream as that has its indexes start from one. binlog_stream_offset = self.state["last_processed_index"] or 0 self.update_state(binlog_stream_offset=binlog_stream_offset, current_binlog_stream_index=current_index + 1) self.log.info( "Switched to binlog stream index %s, index adjustment set to %s", current_index + 1, binlog_stream_offset ) return True
test_daemon.py
import datetime import multiprocessing import os import os.path import time import iso8601 import pytest from edera import Condition from edera import Parameter from edera import Parameterizable from edera import Task from edera import Timer from edera.daemon import Daemon from edera.daemon import DaemonAutoTester from edera.daemon import DaemonModule from edera.daemon import DaemonSchedule from edera.daemon import StaticDaemonModule from edera.helpers import SimpleBox from edera.lockers import DirectoryLocker from edera.monitoring import MonitorWatcher from edera.storages import SQLiteStorage from edera.testing import TestableTask class FileSystem(object): def __init__(self, root): self.root = root def check(self, path): return os.path.exists(os.path.join(self.root, path)) def create(self, path): if not os.path.exists(self.root): os.makedirs(self.root) with open(os.path.join(self.root, path), "w"): pass def test_daemon_can_idle(): class Idle(Task): pass class MainModule(StaticDaemonModule): root = Idle() class MyDaemon(Daemon): main = MainModule() daemon = MyDaemon() with pytest.raises(Timer.Timeout): daemon.run[Timer(datetime.timedelta(seconds=5))]() def test_daemon_functions_correctly_in_production_mode(tmpdir): class FileExists(Parameterizable, Condition): path = Parameter() def check(self): return fs.check(self.path) class CreateFile(Parameterizable, Task): path = Parameter() def execute(self): fs.create(self.path) @property def target(self): return FileExists(path=self.path) class SupportModule(StaticDaemonModule): root = CreateFile(path="support") class PreludeModule(StaticDaemonModule): root = CreateFile(path="prelude") class MainModule(DaemonModule): scheduling = { None: DaemonSchedule(building_delay="PT1S", execution_delay="PT1S", executor_count=2), } def seed(self, now): path = "main." + now.astimezone(iso8601.UTC).strftime("%Y-%m-%dT%H:%M:%S") return CreateFile(path=path) class MyDaemon(Daemon): cache = SQLiteStorage(str(tmpdir.join("cache.db"))) locker = DirectoryLocker(str(tmpdir.join("locks"))) monitor = SQLiteStorage(str(tmpdir.join("monitor.db"))) support = SupportModule() prelude = PreludeModule() main = MainModule() fs = FileSystem(str(tmpdir)) daemon = MyDaemon() process = multiprocessing.Process(target=daemon.run) process.start() time.sleep(30) process.terminate() process.join(15) files = set(path.basename for path in tmpdir.listdir()) assert "cache.db" in files assert "locks" in files assert "monitor.db" in files assert "support" in files assert "prelude" in files assert len([name for name in files if name.startswith("main.")]) >= 3 watcher = MonitorWatcher(MyDaemon.monitor) assert len(watcher.load_snapshot_core().states) >= 5 def test_daemon_functions_correctly_in_autotesting_mode(tmpdir): class FileExists(Parameterizable, Condition): path = Parameter() def check(self): return fs().check(self.path) class CreateFile(Parameterizable, TestableTask): path = Parameter() def execute(self): fs().create(self.path) @property def target(self): return FileExists(path=self.path) class PreludeModule(StaticDaemonModule): root = CreateFile(path="prelude") class MainModule(DaemonModule): scheduling = { None: DaemonSchedule(building_delay="PT1S", execution_delay="PT1S", executor_count=2), } def seed(self, now): path = "main." + now.astimezone(iso8601.UTC).strftime("%Y-%m-%dT%H:%M:%S") return CreateFile(path=path) class MyDaemon(Daemon): cache = SQLiteStorage(str(tmpdir.join("cache.db"))) locker = DirectoryLocker(str(tmpdir.join("locks"))) monitor = SQLiteStorage(str(tmpdir.join("monitor.db"))) prelude = PreludeModule() main = MainModule() colorbox = SimpleBox() @property def autotester(self): return MyDaemonAutoTester() class MyDaemonAutoTester(DaemonAutoTester): box = MyDaemon.colorbox registry = MyDaemon.cache def finish(self): fs().create("TESTED") def fs(): color = MyDaemon.colorbox.get() root = str(tmpdir) if color is None else str(tmpdir.join(color)) return FileSystem(root) daemon = MyDaemon() process = multiprocessing.Process(target=daemon.run) process.start() time.sleep(30) process.terminate() process.join(15) files = set(path.basename for path in tmpdir.listdir()) assert "cache.db" in files assert "locks" in files assert "monitor.db" in files assert "prelude" in files assert "4707242e" in files assert tmpdir.join("4707242e").listdir()[0].basename == "main.1991-07-26T09:00:00" assert "TESTED" in files watcher = MonitorWatcher(MyDaemon.monitor) assert len(watcher.load_snapshot_core().states) == 2
node.py
from uuid import uuid4 import requests import json from random import sample from os.path import exists from gzip import GzipFile import time import threading import logging logger = logging.getLogger(__name__) # number of node that each node send the message to in the P2P network ADJENCENT_NODES=3 from block import Blockchain,Transaction,SignedTransaction,transaction_from_dict,block_from_dict class SignatureError(Exception): """exceptions in case of bad signature""" pass class OutOfToken(Exception): """exceptions in case of lack of token to cover transaction""" pass class Node(object): """ Implement a blochain node """ def __init__(self): self.node_identifier = str(uuid4()).replace('-', '') self.nodeUrl=None self.nodeList=set() self.blockchain = Blockchain() def set_node_url(self,nodeUrl): self.nodeUrl=nodeUrl self.nodeList.add(nodeUrl) def init_blockchain(self): self.blockchain_filename="blockchain_"+self.nodeUrl.replace(":","_")+".gz" if exists(self.blockchain_filename): logger.info("Loading blockchain from file:"+self.blockchain_filename) self.blockchain.chain=[] for l in GzipFile(self.blockchain_filename,"r"): block_dict=json.loads(l.decode().strip()) self.blockchain.chain.append(block_from_dict(block_dict)) logger.info("Blockchain loaded") def save_block(self,block): with GzipFile(self.blockchain_filename,"a") as f: f.write(json.dumps(block.to_dict()).encode()+b"\n") def save_chain(self,chain): with GzipFile(self.blockchain_filename,"w") as f: for block in chain: f.write(json.dumps(block.to_dict()).encode()+b"\n") logger.info("Blockchain saved") def register_node(self,registerNodeUrl): logger.info(f'Registering node {self.nodeUrl} into {registerNodeUrl}') response=requests.post(f"http://{registerNodeUrl}/nodes/add", headers={"Content-Type": "application/json"}, data=json.dumps({"node":self.nodeUrl})) values=response.json() self.nodeList=set(values['nodes']) logger.info("Connected to nodes:"+",".join(self.nodeList)) # simple to initialise the blockchain self.blockchain.chain=[] self.resolve_conflicts() def add_node(self,newNodeUrl): logger.info(f'Adding node {newNodeUrl}') if newNodeUrl not in self.nodeList: self.nodeList.add(newNodeUrl) self.broadcast_event({"type":"new_node","nodeUrl":newNodeUrl},set([newNodeUrl])) return self.nodeList def add_nodes(self,addNodeList): self.nodeList=self.nodeList.union(addNodeList) def parse_transaction_values(self,values): trvalues=values['transaction'] transaction=transaction_from_dict(trvalues) signedTransaction=SignedTransaction(transaction,values['signature'],values['public_key']) return signedTransaction def new_transaction(self,values): signedTransaction=self.parse_transaction_values(values) if signedTransaction.is_valid(): balance=self.blockchain.get_user_balance(signedTransaction.transaction.sender) if signedTransaction.transaction.amount > balance: #the user has not enough token to cover this transaction raise OutOfToken index=self.blockchain.new_transaction(signedTransaction.transaction) logger.info("New transaction added coming client") self.broadcast_event({"type":"new_transaction","transaction":values},set()) return index else: raise SignatureError def broadcast_event(self,event,visitedNodes): visitedNodes.add(self.nodeUrl) targetedNodes=self.nodeList.difference(visitedNodes) if len(targetedNodes)>ADJENCENT_NODES: # if to many node we sample to a sublist and let the other nodes do the work targetedNodes=sample(targetedNodes,ADJENCENT_NODES) newVisitedNodes=visitedNodes.union(targetedNodes) if len(targetedNodes)>0: message=json.dumps({"event":event,"nodefrom":self.nodeUrl,"visited_nodes":list(newVisitedNodes)}) logger.debug("broacasting message"+message+" to:"+",".join(targetedNodes)) # we target only a subset of node expecting the other to broadcast the message to their neighbours for node in targetedNodes: requests.post(f"http://{node}/broadcast/event", headers={"Content-Type": "application/json"}, data=message) def received_event(self,event,nodefrom,visitedNodes): logger.debug('event received '+ json.dumps(event)) if event["type"]=="new_node": newUrl=event["nodeUrl"] self.nodeList.add(newUrl) logger.info(f"node {newUrl} added") logger.debug(f"all nodes "+",".join(self.nodeList)) elif event["type"]=="new_transaction": # we should validate if the transaction is valid to avoid forged transactions values=event["transaction"] signedTransaction=self.parse_transaction_values(values) if signedTransaction.is_valid(): index=self.blockchain.new_transaction(signedTransaction.transaction) logger.info("New transaction added coming from node:%s to block %d"%(nodefrom,index)) else: logger.error("Received an invalid transaction") elif event["type"]=="new_block": # we should validate if the block is valid to avoid forged block block=block_from_dict(event["block"]) if self.blockchain.add_block(block): self.save_block(block) visitedNodes.add(self.nodeUrl) self.broadcast_event(event,visitedNodes) def mine(self): # We run the proof of work algorithm to get the next proof... proof = self.blockchain.proof_of_work() # We must receive a reward for finding the proof. # The sender is "0" to signify that this node has mined a new coin. #self.blockchain.new_transaction( # Transaction(sender="0",recipient=self.node_identifier,amount=1) #) # Forge the new Block by adding it to the chain previous_hash = self.blockchain.last_block.hash() block = self.blockchain.new_block(proof,previous_hash) logger.info(f"mined block {block.index}") self.broadcast_event({"type":"new_block","block":block.to_dict()},set()) return block def resolve_conflicts(self): """ This is our consensus algorithm, it resolves conflicts by replacing our chain with the longest one in the network. :return: True if our chain was replaced, False if not """ logger.info("Resolving conflicts") new_chain = None # We're only looking for chains longer than ours max_length = len(self.blockchain.chain) # Grab and verify the chains from all the nodes in our network for node in self.nodeList: if node!=self.nodeUrl: response = requests.get(f'http://{node}/chain') if response.status_code == 200: length = response.json()['length'] chain_dict = response.json()['chain'] chain=[block_from_dict(json_block) for json_block in chain_dict] # Check if the length is longer and the chain is valid if not self.blockchain.valid_chain(chain): logger.debug("invalid chain received") continue if length > max_length: max_length = length new_chain = chain # Replace our chain if we discovered a new, valid chain longer than ours if new_chain: logger.info("New chain loaded") self.blockchain.chain = new_chain self.save_chain(new_chain) return True return False class BackgroundMiner(object): def __init__(self,node,interval=30): self.interval = interval thread = threading.Thread(target=self.run, args=(node,)) thread.daemon = True thread.start() def run(self,node): while True: # More statements comes here if len(node.blockchain.current_transactions)>0: logger.info("Mining a new block") node.mine() time.sleep(self.interval)
translator.py
# -*- coding: utf-8 -*- import re import threading import socket import sys import time import os import random import copy import json import argparse import codecs if sys.version_info.major < 3: is_py3 = False reload(sys) sys.setdefaultencoding("utf-8") sys.stdout = codecs.getwriter("utf-8")(sys.stdout) sys.stderr = codecs.getwriter("utf-8")(sys.stderr) from urlparse import urlparse from urllib import urlencode from urllib import quote_plus as url_quote from urllib2 import urlopen from urllib2 import Request from urllib2 import URLError from urllib2 import HTTPError else: is_py3 = True sys.stdout = codecs.getwriter("utf-8")(sys.stdout.buffer) sys.stderr = codecs.getwriter("utf-8")(sys.stderr.buffer) from urllib.parse import urlencode from urllib.parse import quote_plus as url_quote from urllib.parse import urlparse from urllib.request import urlopen from urllib.request import Request from urllib.error import URLError from urllib.error import HTTPError class BaseTranslator(object): def __init__(self, name): self._name = name self._proxy_url = None self._agent = ( "Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0" ) def request(self, url, data=None, post=False, header=None): if header: header = copy.deepcopy(header) else: header = {} header[ "User-Agent" ] = "Mozilla/5.0 (X11; Linux x86_64) \ AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36" if post: if data: data = urlencode(data).encode("utf-8") else: if data: query_string = urlencode(data) url = url + "?" + query_string data = None req = Request(url, data, header) try: r = urlopen(req, timeout=5) except (URLError, HTTPError, socket.timeout): sys.stderr.write( "Engine %s timed out, please check your network\n" % self._name ) return None if is_py3: charset = r.headers.get_param("charset") or "utf-8" else: charset = r.headers.getparam("charset") or "utf-8" r = r.read().decode(charset) return r def http_get(self, url, data=None, header=None): return self.request(url, data, False, header) def http_post(self, url, data=None, header=None): return self.request(url, data, True, header) def set_proxy(self, proxy_url=None): try: import socks except ImportError: sys.stderr.write("pySocks module should be installed\n") return None try: import ssl ssl._create_default_https_context = ssl._create_unverified_context except Exception: pass self._proxy_url = proxy_url proxy_types = { "http": socks.PROXY_TYPE_HTTP, "socks": socks.PROXY_TYPE_SOCKS4, "socks4": socks.PROXY_TYPE_SOCKS4, "socks5": socks.PROXY_TYPE_SOCKS5, } url_component = urlparse(proxy_url) proxy_args = { "proxy_type": proxy_types[url_component.scheme], "addr": url_component.hostname, "port": url_component.port, "username": url_component.username, "password": url_component.password, } socks.set_default_proxy(**proxy_args) socket.socket = socks.socksocket def test_request(self, test_url): print("test url: %s" % test_url) print(self.request(test_url)) def create_translation(self, sl="auto", tl="auto", text=""): res = {} res["engine"] = self._name res["sl"] = sl # 来源语言 res["tl"] = tl # 目标语言 res["text"] = text # 需要翻译的文本 res["phonetic"] = "" # 音标 res["paraphrase"] = "" # 简单释义 res["explains"] = [] # 分行解释 return res # 翻译结果:需要填充如下字段 def translate(self, sl, tl, text): return self.create_translation(sl, tl, text) def md5sum(self, text): import hashlib m = hashlib.md5() if sys.version_info[0] < 3: if isinstance(text, unicode): # noqa: F821 text = text.encode("utf-8") else: if isinstance(text, str): text = text.encode("utf-8") m.update(text) return m.hexdigest() def html_unescape(self, text): # https://stackoverflow.com/questions/2087370/decode-html-entities-in-python-string # Python 3.4+ if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: import html return html.unescape(text) else: try: # Python 2.6-2.7 from HTMLParser import HTMLParser except ImportError: # Python 3 from html.parser import HTMLParser h = HTMLParser() return h.unescape(text) # NOTE: expired class BaicizhanTranslator(BaseTranslator): def __init__(self): super(BaicizhanTranslator, self).__init__("baicizhan") def translate(self, sl, tl, text, options=None): url = "http://mall.baicizhan.com/ws/search" req = {} req["w"] = url_quote(text) resp = self.http_get(url, req, None) if not resp: return None try: obj = json.loads(resp) except: return None res = self.create_translation(sl, tl, text) res["phonetic"] = self.get_phonetic(obj) res["explains"] = self.get_explains(obj) return res def get_phonetic(self, obj): return obj["accent"] if "accent" in obj else "" def get_explains(self, obj): return ["; ".join(obj["mean_cn"].split("\n"))] if "mean_cn" in obj else [] class BingDict(BaseTranslator): def __init__(self): super(BingDict, self).__init__("bing") self._url = "http://bing.com/dict/SerpHoverTrans" self._cnurl = "http://cn.bing.com/dict/SerpHoverTrans" def translate(self, sl, tl, text, options=None): url = self._cnurl if "zh" in tl else self._url url = url + "?q=" + url_quote(text) headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", } resp = self.http_get(url, None, headers) if not resp: return None res = self.create_translation(sl, tl, text) res["phonetic"] = self.get_phonetic(resp) res["explains"] = self.get_explains(resp) return res def get_phonetic(self, html): if not html: return "" m = re.findall(r'<span class="ht_attr" lang=".*?">\[(.*?)\] </span>', html) if not m: return "" return self.html_unescape(m[0].strip()) def get_explains(self, html): if not html: return [] m = re.findall( r'<span class="ht_pos">(.*?)</span><span class="ht_trs">(.*?)</span>', html ) expls = [] for item in m: expls.append("%s %s" % item) return expls class GoogleTranslator(BaseTranslator): def __init__(self): super(GoogleTranslator, self).__init__("google") self._host = "translate.googleapis.com" self._cnhost = "translate.google.cn" def get_url(self, sl, tl, qry): http_host = self._cnhost if "zh" in tl else self._host qry = url_quote(qry) url = ( "https://{}/translate_a/single?client=gtx&sl={}&tl={}&dt=at&dt=bd&dt=ex&" "dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&q={}".format( http_host, sl, tl, qry ) ) return url def translate(self, sl, tl, text, options=None): url = self.get_url(sl, tl, text) resp = self.http_get(url) if not resp: return None try: obj = json.loads(resp) except: return None res = self.create_translation(sl, tl, text) res["paraphrase"] = self.get_paraphrase(obj) res["explains"] = self.get_explains(obj) res["phonetic"] = self.get_phonetic(obj) res["detail"] = self.get_detail(obj) res["alternative"] = self.get_alternative(obj) return res def get_phonetic(self, obj): for x in obj[0]: if len(x) == 4: return x[3] return "" def get_paraphrase(self, obj): paraphrase = "" for x in obj[0]: if x[0]: paraphrase += x[0] return paraphrase def get_explains(self, obj): explains = [] if obj[1]: for x in obj[1]: expl = "[{}] ".format(x[0][0]) for i in x[2]: expl += i[0] + ";" explains.append(expl) return explains def get_detail(self, resp): if len(resp) < 13 or resp[12] is None: return [] result = [] for x in resp[12]: result.append("[{}]".format(x[0])) for y in x[1]: result.append("- {}".format(y[0])) if len(y) >= 3: result.append(" * {}".format(y[2])) return result def get_alternative(self, resp): if len(resp) < 6 or resp[5] is None: return [] definition = self.get_paraphrase(resp) result = [] for x in resp[5]: # result.append('- {}'.format(x[0])) for i in x[2]: if i[0] != definition: result.append(" * {}".format(i[0])) return result class HaiciDict(BaseTranslator): def __init__(self): super(HaiciDict, self).__init__("haici") def translate(self, sl, tl, text, options=None): url = "http://dict.cn/mini.php" req = {} req["q"] = url_quote(text) resp = self.http_get(url, req) if not resp: return res = self.create_translation(sl, tl, text) res["phonetic"] = self.get_phonetic(resp) res["explains"] = self.get_explains(resp) return res def get_phonetic(self, html): m = re.findall(r"<span class='p'> \[(.*?)\]</span>", html) return m[0] if m else "" def get_explains(self, html): m = re.findall(r'<div id="e">(.*?)</div>', html) explains = [] for item in m: for e in item.split("<br>"): explains.append(e) return explains # NOTE: deprecated class ICibaTranslator(BaseTranslator): def __init__(self): super(ICibaTranslator, self).__init__("iciba") def translate(self, sl, tl, text, options=None): url = "http://www.iciba.com/index.php" req = {} req["a"] = "getWordMean" req["c"] = "search" req["word"] = url_quote(text) resp = self.http_get(url, req, None) if not resp: return None try: obj = json.loads(resp) obj = obj["baesInfo"]["symbols"][0] except: return None res = self.create_translation(sl, tl, text) res["paraphrase"] = self.get_paraphrase(obj) res["phonetic"] = self.get_phonetic(obj) res["explains"] = self.get_explains(obj) return res def get_paraphrase(self, obj): try: return obj["parts"][0]["means"][0] except: return "" def get_phonetic(self, obj): return obj["ph_en"] if "ph_en" in obj else "" def get_explains(self, obj): parts = obj["parts"] if "parts" in obj else [] explains = [] for part in parts: explains.append(part["part"] + ", ".join(part["means"])) return explains class YoudaoTranslator(BaseTranslator): def __init__(self): super(YoudaoTranslator, self).__init__("youdao") self.url = "https://fanyi.youdao.com/translate_o" self.D = "97_3(jkMYg@T[KZQmqjTK" # 备用 self.D = "n%A-rKaT5fb[Gy?;N5@Tj" def sign(self, text, salt): s = "fanyideskweb" + text + salt + self.D return self.md5sum(s) def translate(self, sl, tl, text, options=None): salt = str(int(time.time() * 1000) + random.randint(0, 10)) sign = self.sign(text, salt) header = { "Cookie": "OUTFOX_SEARCH_USER_ID=-2022895048@10.168.8.76;", "Referer": "http://fanyi.youdao.com/", "User-Agent": "Mozilla/5.0 (Windows NT 6.2; rv:51.0) Gecko/20100101 Firefox/51.0", } data = { "i": url_quote(text), "from": sl, "to": tl, "smartresult": "dict", "client": "fanyideskweb", "salt": salt, "sign": sign, "doctype": "json", "version": "2.1", "keyfrom": "fanyi.web", "action": "FY_BY_CL1CKBUTTON", "typoResult": "true", } resp = self.http_post(self.url, data, header) if not resp: return try: obj = json.loads(resp) except: return None res = self.create_translation(sl, tl, text) res["paraphrase"] = self.get_paraphrase(obj) res["explains"] = self.get_explains(obj) return res def get_paraphrase(self, obj): translation = "" t = obj.get("translateResult") if t: for n in t: part = [] for m in n: x = m.get("tgt") if x: part.append(x) if part: translation += ", ".join(part) return translation def get_explains(self, obj): explains = [] if "smartResult" in obj: smarts = obj["smartResult"]["entries"] for entry in smarts: if entry: entry = entry.replace("\r", "") entry = entry.replace("\n", "") explains.append(entry) return explains class TranslateShell(BaseTranslator): def __init__(self): super(TranslateShell, self).__init__("trans") def translate(self, sl, tl, text, options=None): if not options: options = [] if self._proxy_url: options.append("-proxy {}".format(self._proxy_url)) default_opts = [ "-no-ansi", "-no-theme", "-show-languages n", "-show-prompt-message n", "-show-translation-phonetics n", "-hl {}".format(tl), ] options = default_opts + options source_lang = "" if sl == "auto" else sl cmd = "trans {} {}:{} '{}'".format(" ".join(options), source_lang, tl, text) run = os.popen(cmd) lines = [] for line in run.readlines(): line = re.sub(r"[\t\n]", "", line) line = re.sub(r"\v.*", "", line) line = re.sub(r"^\s*", "", line) lines.append(line) res = self.create_translation(sl, tl, text) res["explains"] = lines run.close() return res class SdcvShell(BaseTranslator): def __init__(self): super(SdcvShell, self).__init__("sdcv") def get_dictionary(self, sl, tl, text): """get dictionary of sdcv :sl: source_lang :tl: target_lang :returns: dictionary """ dictionary = "" if sl == "": try: import langdetect except ImportError: sys.stderr.write("langdetect module should be installed\n") return None sl = langdetect.detect(text) if (sl == "en") & (tl == "zh"): dictionary = "朗道英汉字典5.0" elif (sl == "zh_cn") & (tl == "en"): dictionary = "朗道汉英字典5.0" elif (sl == "en") & (tl == "ja"): dictionary = "jmdict-en-ja" elif (sl == "ja") & (tl == "en"): dictionary = "jmdict-ja-en" return dictionary def translate(self, sl, tl, text, options=None): if not options: options = [] if self._proxy_url: options.append("-proxy {}".format(self._proxy_url)) source_lang = "" if sl == "auto" else sl dictionary = self.get_dictionary(source_lang, tl, text) if dictionary == "": default_opts = [] else: default_opts = [" ".join(["-u", dictionary])] options = default_opts + options cmd = "sdcv {} '{}'".format(" ".join(options), text) run = os.popen(cmd) lines = [] for line in run.readlines(): line = re.sub(r"^Found.*", "", line) line = re.sub(r"^-->.*", "", line) line = re.sub(r"^\s*", "", line) line = re.sub(r"^\*", "", line) lines.append(line) res = self.create_translation(sl, tl, text) res["explains"] = lines run.close() return res ENGINES = { "iciba": ICibaTranslator, "bing": BingDict, "baicizhan": BaicizhanTranslator, "haici": HaiciDict, "google": GoogleTranslator, "sdcv": SdcvShell, "trans": TranslateShell, "youdao": YoudaoTranslator, } def main(): parser = argparse.ArgumentParser() parser.add_argument("--engines", nargs="+", required=False, default=["google"]) parser.add_argument("--target_lang", required=False, default="zh") parser.add_argument("--source_lang", required=False, default="en") parser.add_argument("--proxy", required=False) parser.add_argument("--options", type=str, default=None, required=False) parser.add_argument("text", nargs="+", type=str) args = parser.parse_args() text = " ".join(args.text).strip("'").strip('"').strip() text = re.sub(r"([a-z])([A-Z][a-z])", r"\1 \2", text) text = re.sub(r"([a-zA-Z])_([a-zA-Z])", r"\1 \2", text).lower() engines = args.engines to_lang = args.target_lang from_lang = args.source_lang if args.options: options = args.options.split(",") else: options = [] translation = {} translation["text"] = text translation["status"] = 1 translation["results"] = [] def runner(translator): res = translator.translate(from_lang, to_lang, text, options) if res: translation["results"].append(copy.deepcopy(res)) else: translation["status"] = 0 threads = [] for e in engines: cls = ENGINES.get(e) if not cls: sys.stderr.write("Invalid engine name %s\n" % e) continue translator = cls() if args.proxy: translator.set_proxy(args.proxy) t = threading.Thread(target=runner, args=(translator,)) threads.append(t) list(map(lambda x: x.start(), threads)) list(map(lambda x: x.join(), threads)) sys.stdout.write(json.dumps(translation)) if __name__ == "__main__": def test0(): t = BaseTranslator("test_proxy") t.set_proxy("http://localhost:8087") t.test_request("https://www.google.com") def test1(): t = BaicizhanTranslator() r = t.translate("", "zh", "naive") print(r) def test2(): t = BingDict() r = t.translate("", "", "naive") print(r) def test3(): gt = GoogleTranslator() r = gt.translate("auto", "zh", "filencodings") print(r) def test4(): t = HaiciDict() r = t.translate("", "zh", "naive") print(r) def test5(): t = ICibaTranslator() r = t.translate("", "", "naive") print(r) def test6(): t = TranslateShell() r = t.translate("auto", "zh", "naive") print(r) def test7(): t = YoudaoTranslator() r = t.translate("auto", "zh", "naive") print(r) # test3() main()
main.py
import sounddevice as sd import soundfile as sf from scipy.io.wavfile import write import re, queue, sys, tempfile, numpy, threading from os import system, path, remove, listdir TRANSCRIPT = "transcript.txt" AUDIO_PATH = "wavs" TEXT_FILE = path.join("text", "natgeo.txt") LOG_FILE = "log.txt" q = queue.Queue() def callback(indata, frames, time, status): if status: print(status, file = sys.stderr) q.put(indata.copy()) # AI Model must be mono, with a frequency of 22 khz def recordAudio(i, running): with sf.SoundFile(path.join(AUDIO_PATH, str(i) + ".wav"), mode = 'x', samplerate = 22050, channels = 1) as file: with sd.InputStream(samplerate = 22050, device = 1, channels = 1, callback = callback): while running.is_set(): file.write(q.get()) def purgeShort(): wavs = listdir(AUDIO_PATH) for i in range(len(wavs)): if wavs[i].endswith(".wav"): wf = sf.SoundFile("wavs/" + wavs[i]) tf = open(TRANSCRIPT, 'r+') lines = tf.readlines() if (f.frames / f.samplerate) < 3: remove(AUDIO_PATH + "/" + wavs[i]) del lines[i] print("[!] Removed recording [" + str(i) + "], too short.") tf.seek(0) tf.truncate() tf.writelines(lines) wf.close() tf.close() sentences = [] with open(TEXT_FILE, 'r', encoding = "utf8") as f: sentences = re.split('(?<=[.!?]) +', ''.join(f.readlines()).replace("\n", " ")) sentences = [sentences[i] + sentences[i + 1] + " " for i in range(0, len(sentences), 2)] print("[*] This bot aims to help you more efficiently train your machine learning audio model.") input("[!] Press enter to start recording...\n") startIndex = 0 with open(LOG_FILE, 'r') as f: firstLine = f.readline() if firstLine != "": answer = input("[!] You have already recorded audio. Do you want to continue from that point or restart? (y/n) ") if answer == "y": startIndex = int(firstLine) elif answer == "n": pass else: print("\nAn unknown answer was given. Exiting...") sys.exit() for i in range(startIndex, len(sentences)): try: transcript = open(TRANSCRIPT, 'a', encoding = "utf8") log = open(LOG_FILE, 'w', encoding = "utf8") system("cls") print(("#" * 20) + " SENTENCE " + str(i) + " " + ("#" * 20)) print("\n" + sentences[i] + "\n") print("#" * 50) print("") if path.isfile(path.join(AUDIO_PATH, str(i) + ".wav")): remove(path.join(AUDIO_PATH, str(i) + ".wav")) running = threading.Event() running.set() t = threading.Thread(target = recordAudio, args = (i, running,)) t.start() print("[-->] Press enter to continue to next sentence...") print("[--X] Or enter 'p' to pause...\n") next = input() running.clear() t.join() if next == "p": print("\n[!] Paused recording.") print("Press enter to continue...") input() transcript.write(str(path.join(AUDIO_PATH, str(i) + ".wav")) + "|" + sentences[i] + "\n") log.write(str(i) + "\n") except KeyboardInterrupt: print("\n\n[!] Exiting...\n") running.clear() t.join() purgeShort() break transcript.close() log.close()
TServer.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import Queue import logging import os import sys import threading import traceback from thrift.Thrift import TProcessor from thrift.protocol import TBinaryProtocol from thrift.transport import TTransport class TServer: """Base interface for a server, which must have a serve() method. Three constructors for all servers: 1) (processor, serverTransport) 2) (processor, serverTransport, transportFactory, protocolFactory) 3) (processor, serverTransport, inputTransportFactory, outputTransportFactory, inputProtocolFactory, outputProtocolFactory) """ def __init__(self, *args): if (len(args) == 2): self.__initArgs__(args[0], args[1], TTransport.TTransportFactoryBase(), TTransport.TTransportFactoryBase(), TBinaryProtocol.TBinaryProtocolFactory(), TBinaryProtocol.TBinaryProtocolFactory()) elif (len(args) == 4): self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3]) elif (len(args) == 6): self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5]) def __initArgs__(self, processor, serverTransport, inputTransportFactory, outputTransportFactory, inputProtocolFactory, outputProtocolFactory): self.processor = processor self.serverTransport = serverTransport self.inputTransportFactory = inputTransportFactory self.outputTransportFactory = outputTransportFactory self.inputProtocolFactory = inputProtocolFactory self.outputProtocolFactory = outputProtocolFactory def serve(self): pass class TSimpleServer(TServer): """Simple single-threaded server that just pumps around one transport.""" def __init__(self, *args): TServer.__init__(self, *args) def serve(self): self.serverTransport.listen() while True: client = self.serverTransport.accept() itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException as tx: pass except Exception as x: logging.exception(x) itrans.close() otrans.close() class TThreadedServer(TServer): """Threaded server that spawns a new thread per each connection.""" def __init__(self, *args, **kwargs): TServer.__init__(self, *args) self.daemon = kwargs.get("daemon", False) def serve(self): self.serverTransport.listen() while True: try: client = self.serverTransport.accept() t = threading.Thread(target=self.handle, args=(client,)) t.setDaemon(self.daemon) t.start() except KeyboardInterrupt: raise except Exception as x: logging.exception(x) def handle(self, client): itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException as tx: pass except Exception as x: logging.exception(x) itrans.close() otrans.close() class TThreadPoolServer(TServer): """Server with a fixed size pool of threads which service requests.""" def __init__(self, *args, **kwargs): TServer.__init__(self, *args) self.clients = Queue.Queue() self.threads = 10 self.daemon = kwargs.get("daemon", False) def setNumThreads(self, num): """Set the number of worker threads that should be created""" self.threads = num def serveThread(self): """Loop around getting clients from the shared queue and process them.""" while True: try: client = self.clients.get() self.serveClient(client) except Exception as x: logging.exception(x) def serveClient(self, client): """Process input/output from a client for as long as possible""" itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException as tx: pass except Exception as x: logging.exception(x) itrans.close() otrans.close() def serve(self): """Start a fixed number of worker threads and put client into a queue""" for i in range(self.threads): try: t = threading.Thread(target=self.serveThread) t.setDaemon(self.daemon) t.start() except Exception as x: logging.exception(x) # Pump the socket for clients self.serverTransport.listen() while True: try: client = self.serverTransport.accept() self.clients.put(client) except Exception as x: logging.exception(x) class TForkingServer(TServer): """A Thrift server that forks a new process for each request This is more scalable than the threaded server as it does not cause GIL contention. Note that this has different semantics from the threading server. Specifically, updates to shared variables will no longer be shared. It will also not work on windows. This code is heavily inspired by SocketServer.ForkingMixIn in the Python stdlib. """ def __init__(self, *args): TServer.__init__(self, *args) self.children = [] def serve(self): def try_close(file): try: file.close() except IOError as e: logging.warning(e, exc_info=True) self.serverTransport.listen() while True: client = self.serverTransport.accept() try: pid = os.fork() if pid: # parent # add before collect, otherwise you race w/ waitpid self.children.append(pid) self.collect_children() # Parent must close socket or the connection may not get # closed promptly itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) try_close(itrans) try_close(otrans) else: itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) ecode = 0 try: try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException as tx: pass except Exception as e: logging.exception(e) ecode = 1 finally: try_close(itrans) try_close(otrans) os._exit(ecode) except TTransport.TTransportException as tx: pass except Exception as x: logging.exception(x) def collect_children(self): while self.children: try: pid, status = os.waitpid(0, os.WNOHANG) except os.error: pid = None if pid: self.children.remove(pid) else: break
parallel_util.py
#!/usr/bin/python # -*- coding: utf-8 -*- from multiprocessing.pool import Pool from typing import Dict, Tuple, List, Set, Union, Optional, Callable, Generic, TypeVar from multiprocessing import Process, Queue, get_start_method, set_start_method import time import os from nose.tools import eq_ from pyutils.progress_utils import Timer from semantic_modeling.config import get_logger """Provide an easy and quick way to test if parallel is worth to do (overhead cost of serialize/deserialize arguments)""" logger = get_logger("default") def get_args_size(*args) -> int: total_element = 0 for arg in args: if isinstance(arg, (list, dict, tuple)): total_element += len(arg) else: total_element += 1 return total_element def minimal_computing_func(queue, *args): """A function that doesn't do anything but use to test overhead cost of multiprocessing""" queue.put(get_args_size(*args)) def zero_computing_func(*args): return len(args) def get_batchs(n_elements: int, n_batch: int) -> List[Tuple[int, int]]: batch_size = int(n_elements / n_batch) batchs = [(i * batch_size, (i + 1) * batch_size) for i in range(n_batch)] batchs[-1] = (batchs[-1][0], n_elements) return batchs def benchmark_overhead_time(get_args: Callable[[], Tuple]): """Note: use a function to create arguments instead of passing through function arguments, because when we using fork, new processes also inherit arguments through copy not pickling. """ queue = Queue() timer = Timer("ms").start() p = Process(target=minimal_computing_func, args=(queue, 'peter', "john")) p.start() eq_(queue.get(), 2) p.join() logger.info("Default overhead: %s", timer.lap().get_total_time()) default_time = timer.total_time args = get_args() arg_size = get_args_size(*args) assert queue.empty() timer.reset() p = Process(target=minimal_computing_func, args=tuple([queue] + list(args))) p.start() eq_(queue.get(), arg_size) p.join() logger.info("Default overhead + serialize input takes: %s", timer.lap().get_total_time()) logger.info("=> Overhead of sending input: %s ms", round((timer.total_time - default_time) * 1000, 4)) def benchmark_sending_time(get_args: Callable[[], Tuple]): def test(inqueue, outqueue): outqueue.put(len(inqueue.get())) inqueue = Queue() outqueue = Queue() args = get_args() n_args = len(args) timer = Timer("ms").start() inqueue.put(list(range(5))) p = Process(target=test, args=(inqueue, outqueue)) p.start() res = outqueue.get() p.join() logger.info("Default time: %s", timer.lap().get_total_time()) assert res == 5 assert inqueue.empty() and outqueue.empty() default_time = timer.total_time timer.reset() inqueue.put(args) p = Process(target=test, args=(inqueue, outqueue)) p.start() res = outqueue.get() p.join() logger.info("Default time + sending input: %s", timer.lap().get_total_time()) assert res == n_args logger.info("=> Sending input: %s ms", round((timer.total_time - default_time) * 1000, 4)) def benchmark_overhead_pool_map_time(get_args: Callable[[], List[Tuple]], n_args: int): """Benchmark overhead when using pool map func (only sending not receiving)""" pool_args = [(i, ) for i in range(n_args)] timer = Timer("ms").start() with Pool() as p: p.map(zero_computing_func, pool_args) logger.info("Default overhead: %s", timer.lap().get_total_time()) default_time = timer.total_time args = get_args() timer.reset() with Pool() as p: result = p.map(zero_computing_func, args) logger.info("Default overhead + serialize input takes: %s", timer.lap().get_total_time()) logger.info("=> Overhead of sending input: %s ms", round((timer.total_time - default_time) * 1000, 4)) def benchmark_overhead_multiprocess_map_time(get_args: Callable[[], List[Tuple]], n_args: int): """Benchmark overhead when using multiprocessing to simulate pool map (fork)""" foo_args = [(i, ) for i in range(n_args)] n_cpu = os.cpu_count() timer = Timer("ms").start() ps = [Process(target=zero_computing_func, args=(foo_args[start:end], )) for start, end in get_batchs(n_args, n_cpu)] for p in ps: p.start() for p in ps: p.join() logger.info("Default overhead: %s", timer.lap().get_total_time()) default_time = timer.total_time args = get_args() timer.reset() ps = [Process(target=zero_computing_func, args=(args[start:end], )) for start, end in get_batchs(n_args, n_cpu)] for p in ps: p.start() for p in ps: p.join() logger.info("Default overhead + serialize input takes: %s", timer.lap().get_total_time()) logger.info("=> Overhead of sending input: %s ms", round((timer.total_time - default_time) * 1000, 4)) def sequential_map(func, args, n_process=None, unpack=False): if unpack: return [func(*arg) for arg in args] else: return [func(arg) for arg in args] def parallel_map_unpack_func(func, queue: Queue, idx: int, batch_args: List[Tuple]) -> None: queue.put((idx, [func(*args) for args in batch_args])) def profiled_parallel_map_unpack_func(func, queue: Queue, idx: int, batch_args: List[Tuple]) -> None: begin = time.time() result = [func(*args) for args in batch_args] exec_time = time.time() - begin queue.put((idx, exec_time, result)) def parallel_map(func, args, n_process=None, unpack=False): if n_process is None: n_process = os.cpu_count() queue = Queue() batch_args = [args[start:end] for start, end in get_batchs(len(args), n_process)] if unpack: processes = [ Process(target=parallel_map_unpack_func, args=( func, queue, idx, batch_arg, )) for idx, batch_arg in enumerate(batch_args) ] else: assert False for p in processes: p.start() results = {} for i in range(n_process): idx, res_array = queue.get() results[idx] = res_array for p in processes: p.join() return [el for idx, res_array in sorted(results.items(), key=lambda x: x[0]) for el in res_array] def parallel_pool_map(func, args, n_process=None): if n_process is None: n_process = os.cpu_count() with Pool(n_process) as p: return p.map(func, args) def profiled_parallel_map(func, args, n_process=None, time_unit: str = "ms", unpack=False): if n_process is None: n_process = os.cpu_count() queue = Queue() batch_args = [args[start:end] for start, end in get_batchs(len(args), n_process)] timer = Timer(time_unit).start() if unpack: processes = [ Process(target=profiled_parallel_map_unpack_func, args=( func, queue, idx, batch_arg, )) for idx, batch_arg in enumerate(batch_args) ] else: assert False for p in processes: p.start() logger.info("Start process takes: %s", timer.lap().get_total_time()) print(len(processes)) total_exec_time = 0 results = {} for i in range(n_process): idx, exec_time, res_array = queue.get() results[idx] = res_array total_exec_time += exec_time for p in processes: p.join() result = [el for idx, res_array in sorted(results.items(), key=lambda x: x[0]) for el in res_array] logger.info("Total mutli-process time: %s", timer.lap().get_total_time()) total_exec_time = round(total_exec_time * timer.time_unit / n_process, 4) logger.info("Total computing-time: %s %s", total_exec_time, time_unit) logger.info("=> Overhead: %s %s", timer.total_time * timer.time_unit - total_exec_time, time_unit) return result def profiled_parallel_pool_map(func, args, n_process=None, time_unit: str = "ms"): if n_process is None: n_process = os.cpu_count() timer = Timer(time_unit).start() with Pool(n_process) as p: results = p.map(func, args) logger.info("Total time: %s", timer.lap().get_total_time()) return results T = TypeVar('T') class AsyncResult(Generic[T]): def __init__(self, val: T): self.val: T = val def get(self) -> T: return self.val class FakePool(object): def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def apply_async(self, func, args): return AsyncResult(func(*args)) def get_pool(n_process: int): if n_process == 1: return FakePool() return Pool(n_process) def func(x, arrays): result = 0 for i in arrays: result += x + i return result def func2(args): x, arrays = args result = 0 for i in arrays: result += x + i return result if __name__ == '__main__': set_start_method("fork") logger.info("Start method: %s", get_start_method()) arrays = [i for i in range(5000)] # create_args = lambda: ([{"number": i} for i in range(5000000)],) # create_args = lambda: [(i, arrays) for i in arrays] # create_args = lambda: [(i, list(range(5000))) for i in range(5000)] create_args = lambda: [i for i in range(10000000)] # benchmark_overhead_time(create_args) benchmark_sending_time(create_args) # benchmark_overhead_multiprocess_map_time(create_args, n_args=len(arrays)) # benchmark_overhead_pool_map_time(create_args, n_args=len(arrays)) # test execution # timer = Timer("ms").start() # n_iter = 5 # for i in range(n_iter): # parallel_map(func, create_args()) # timer.lap() # logger.info("Multi-process map takes: %s", timer.get_average_time()) # timer.reset() # for i in range(n_iter): # profiled_parallel_map(func, create_args()) # timer.lap() # logger.info("Multi-process map takes: %s", timer.get_average_time()) # timer.reset() # parallel_map(func, create_args()) # logger.info("Multi-process map takes: %s", timer.lap().get_total_time()) # timer.reset() # for i in range(n_iter): # parallel_pool_map(func2, create_args()) # timer.lap() # logger.info("Pool map takes: %s", timer.get_average_time()) # # timer.reset() # for i in range(n_iter): # list(map(func2, create_args())) # timer.lap() # logger.info("Sequential map takes: %s", timer.get_average_time())
test_ncpapi.py
#!/usr/bin/python3 # -*- coding: utf-8 -*- # ------------------------------------------------------------------- # Purpose: Naver Cloud Platform APIs Test # Author: Ho-Jung Kim (godmode2k@hotmail.com) # Filename: test_ncpapi.py # Date: Since October 25, 2021 # # # Reference: # - https://api.ncloud-docs.com/beta/docs/common-ncpapi# # - https://api.ncloud-docs.com/beta/docs/management-monitoring-getmetricstatisticlist # - https://github.com/NaverCloudPlatform/ncloud-sdk-python # # # Note: # - USE THIS AT YOUR OWN RISK # # # License: # #* #* Copyright (C) 2021 Ho-Jung Kim (godmode2k@hotmail.com) #* #* Licensed under the Apache License, Version 2.0 (the "License"); #* you may not use this file except in compliance with the License. #* You may obtain a copy of the License at #* #* http://www.apache.org/licenses/LICENSE-2.0 #* #* Unless required by applicable law or agreed to in writing, software #* distributed under the License is distributed on an "AS IS" BASIS, #* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #* See the License for the specific language governing permissions and #* limitations under the License. #* # ------------------------------------------------------------------- import sys import os import hashlib import hmac import base64 import requests import time import json from datetime import datetime, timezone, timedelta import threading from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer ''' $ cat $HOME/.ncloud/configure NCLOUD_ACCESS_KEY_ID={access-key} NCLOUD_SECRET_KEY={secret-key} ncloud_access_key_id={access-key} ncloud_secret_access_key={secret-key} https://ncloud.apigw.ntruss.com/monitoring/v2/getMetricStatisticList?responseFormatType=json POST /monitoring/v2/getMetricStatisticList?responseFormatType=json HTTP/1.1 Host: 127.0.0.1:9999 Accept-Encoding: identity Content-Length: 142 Content-Type: application/x-www-form-urlencoded User-Agent: Swagger-Codegen/1.1.6/python x-ncp-apigw-timestamp: {timestamp} x-ncp-iam-access-key: {access-key} x-ncp-apigw-signature-v1: {sign-key} instanceNoList.1=<instance number>&metricName=DiskWriteBytes&startTime=2021-10-28T00%3A00%3A00%2B0900&endTime=2021-10-28T02%3A00%3A00%2B0900&period=1800 ''' # ------------------------------------------------------ # NCP API # ------------------------------------------------------ class CNCP_API_Monitor(): def __init__(self): pass # ------------------------------------ # ------------------------------------ def make_signature(self, timestamp, access_key, secret_key): method = "POST" uri = "/monitoring/v2/getMetricStatisticList?responseFormatType=json" #uri = "/monitoring/v2/getListMetrics?responseFormatType=json" message = method + " " + uri + "\n" + timestamp + "\n" + access_key print( "message = " + message ) message = bytes(message, 'UTF-8') signingKey = base64.b64encode(hmac.new(secret_key, message, digestmod=hashlib.sha256).digest()) return signingKey # ------------------------------------ # ------------------------------------ def ncp_api_v2_getMetricStatisticList(self, ACCESS_KEY, SECRET_KEY, INSTANCE_NO, METRIC_NAME, TIME_START, TIME_END): TIMESTAMP = int(time.time() * 1000) TIMESTAMP = str( TIMESTAMP ) # test ''' timestamp = int(time.time() * 1000) timestamp = time.time() utc = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') print( "UTC = " + str(utc) ) timestamp = str( timestamp ) #timestamp = datetime.now() + timedelta(hours=9) #timestamp = timestamp.timestamp() #dt_9 = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') #print( "UTC+9 dt_9 = " + str(dt_9) ) ''' # test ''' #timestamp = time.time() #tz = timezone(timedelta(hours=9)) #dt_9 = datetime.fromtimestamp(timestamp, tz) #dt_9 = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S.%s') #timestamp = datetime.timestamp( datetime.strptime(dt_9, '%Y-%m-%d %H:%M:%S') ) #print( "dt_9 = " + str(timestamp) ) #timestamp = datetime.now(timezone.utc).timestamp() ''' # DateTime format # startTime: "2021-10-27T00:00:00+0900" # endTime: "2021-10-28T00:00:00+0900" SIGNKEY = self.make_signature( TIMESTAMP, ACCESS_KEY, SECRET_KEY ) print( SIGNKEY ) SIGNKEY = str( SIGNKEY.decode("utf-8") ) print( "sign_key = " + SIGNKEY ) print( "timestamp = " + TIMESTAMP ) print( "access_key = " + ACCESS_KEY ) URL = "https://ncloud.apigw.ntruss.com/monitoring/v2/getMetricStatisticList?responseFormatType=json" #URL = "https://ncloud.apigw.ntruss.com/monitoring/v2/getListMetrics?responseFormatType=json" #headers = { 'Content-Type':'application/json; charset=utf-8; application/x-www-form-urlencoded', #headers = { 'Content-Type':'application/json; charset=utf-8', headers = { 'Content-Type':'application/x-www-form-urlencoded', "x-ncp-apigw-timestamp": TIMESTAMP, #"x-ncp-apigw-api-key": "", "x-ncp-iam-access-key": ACCESS_KEY, #"x-ncp-apigw-signature-v2": SIGNKEY "x-ncp-apigw-signature-v1": SIGNKEY } data = { "instanceNoList.1": INSTANCE_NO, "metricName": METRIC_NAME, #"startTime": "2021-10-27T00:00:00+0900", #"endTime": "2021-10-28T00:00:00+0900", "startTime": TIME_START, "endTime": TIME_END, "period": 1800 } #data = { "instanceNo": INSTANCE_NO } #print( headers ) res = requests.post( URL, data = data, headers = headers ) #print( "res code = " + str(res.status_code) ) #print( "response = \n" + res.text ) return res # DO NOT USE THIS: # example ''' curl -i -X GET \ -H "x-ncp-apigw-timestamp:1505290625682" \ -H "x-ncp-iam-access-key:D78BB444D6D3C84CA38D" \ -H "x-ncp-apigw-signature-v2:WTPItrmMIfLUk/UyUIyoQbA/z5hq9o3G8eQMolUzTEa=" \ 'https://example.apigw.ntruss.com/photos/puppy.jpg?query1=&query2' ''' # ------------------------------------ # Test # ------------------------------------ def test(self): #INSTANCE_NO = "<instance number>" INSTANCE_NO = "1234" #METRIC_NAME = "DiskWriteBytes" METRIC_NAME = "CPUUtilization" # DateTime format #start: "2021-10-27T00:00:00+0900" #end: "2021-10-28T00:00:00+0900" TIME_START = "2021-10-27T00:00:00+0900" TIME_END = "2021-10-28T00:00:00+0900" #timestamp = time.time() #TIME_START = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%S+0900') #TIME_END = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%S+0900') # Keys ACCESS_KEY = "<ACCESS_KEY>" SECRET_KEY = "<SECRET_KEY>" SECRET_KEY = bytes(SECRET_KEY, 'UTF-8') self.ncp_api_v2_getMetricStatisticList( ACCESS_KEY, SECRET_KEY, INSTANCE_NO, METRIC_NAME, TIME_START, TIME_END ) # ------------------------------------------------------ # JSON-RPC Server # # $ pip install jsonrpclib # # test # $ curl -X POST --data '{"jsonrpc":"2.0","method":"ncp_monitor","params":[],"id":0}' -H "Content-Type: application/json" http://127.0.0.1:8888/ # ------------------------------------------------------ class CJSONRPCServer(): def __init__(self): pass def rpc_call_test(self): result = "test_call" #return json.dumps(result).encode("utf-8") return '{"result": "' + result + '"}' def rpc_call_ncp_monitor(self): #INSTANCE_NO = "<instance number>" INSTANCE_NO = "1234" #METRIC_NAME = "DiskWriteBytes" METRIC_NAME = "CPUUtilization" # DateTime format #start: "2021-10-27T00:00:00+0900" #end: "2021-10-28T00:00:00+0900" TIME_START = "2021-10-27T00:00:00+0900" TIME_END = "2021-10-28T00:00:00+0900" #timestamp = time.time() #TIME_START = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%S+0900') #TIME_END = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%S+0900') #print( utc ) # Keys ACCESS_KEY = "<ACCESS_KEY>" SECRET_KEY = "<SECRET_KEY>" SECRET_KEY = bytes(SECRET_KEY, 'UTF-8') monitor = CNCP_API_Monitor() result = monitor.ncp_api_v2_getMetricStatisticList( ACCESS_KEY, SECRET_KEY, INSTANCE_NO, METRIC_NAME, TIME_START, TIME_END ) #print( "res code = " + str(res.status_code) ) #print( "response = \n" + res.text ) res = {} res["code"] = str( result.status_code ) res["response"] = json.loads( str(result.text ) ) #result = {} #result["result"] = res #json.dumps(result).encode("utf8") #return result return res def run_jsonrpc_server(self): print( "JSON-RPC Server starting..." ) server = SimpleJSONRPCServer( ('0.0.0.0', 8888) ) server.register_function( self.rpc_call_test, "test" ) server.register_function( self.rpc_call_ncp_monitor, "ncp_monitor" ) #server.register_function( lambda x, y: x + y, 'add' ) print( "running..." ) server.serve_forever() # ------------------------------------------------------ # ------------------------------------------------------ if __name__ == "__main__": # JSON-RPC rpc_server = CJSONRPCServer() th_jsonrpc = threading.Thread( target = rpc_server.run_jsonrpc_server ) th_jsonrpc.start() th_jsonrpc.join()
manager.py
#!/usr/bin/env python3 import os import time import sys import fcntl import errno import signal import shutil import subprocess import datetime import textwrap from typing import Dict, List from selfdrive.swaglog import cloudlog, add_logentries_handler from common.op_params import opParams from common.basedir import BASEDIR from common.hardware import HARDWARE, ANDROID, PC WEBCAM = os.getenv("WEBCAM") is not None sys.path.append(os.path.join(BASEDIR, "pyextra")) os.environ['BASEDIR'] = BASEDIR TOTAL_SCONS_NODES = 1040 prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt')) kill_updated = opParams().get('update_behavior').lower().strip() == 'off' or os.path.exists('/data/no_ota_updates') # Create folders needed for msgq try: os.mkdir("/dev/shm") except FileExistsError: pass except PermissionError: print("WARNING: failed to make /dev/shm") if ANDROID: os.chmod("/dev/shm", 0o777) def unblock_stdout(): # get a non-blocking stdout child_pid, child_pty = os.forkpty() if child_pid != 0: # parent # child is in its own process group, manually pass kill signals signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT)) signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM)) fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) while True: try: dat = os.read(child_pty, 4096) except OSError as e: if e.errno == errno.EIO: break continue if not dat: break try: sys.stdout.write(dat.decode('utf8')) except (OSError, IOError, UnicodeDecodeError): pass # os.wait() returns a tuple with the pid and a 16 bit value # whose low byte is the signal number and whose high byte is the exit satus exit_status = os.wait()[1] >> 8 os._exit(exit_status) if __name__ == "__main__": unblock_stdout() from common.spinner import Spinner from common.text_window import TextWindow import importlib import traceback from multiprocessing import Process # Run scons spinner = Spinner() spinner.update("0") if __name__ != "__main__": spinner.close() if not prebuilt: for retry in [True, False]: # run scons env = os.environ.copy() env['SCONS_PROGRESS'] = "1" env['SCONS_CACHE'] = "1" nproc = os.cpu_count() j_flag = "" if nproc is None else "-j%d" % (nproc - 1) scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE) compile_output = [] # Read progress from stderr and update spinner while scons.poll() is None: try: line = scons.stderr.readline() # type: ignore if line is None: continue line = line.rstrip() prefix = b'progress: ' if line.startswith(prefix): i = int(line[len(prefix):]) if spinner is not None: spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES))) elif len(line): compile_output.append(line) print(line.decode('utf8', 'replace')) except Exception: pass if scons.returncode != 0: # Read remaining output r = scons.stderr.read().split(b'\n') # type: ignore compile_output += r if retry: if not os.getenv("CI"): print("scons build failed, cleaning in") for i in range(3, -1, -1): print("....%d" % i) time.sleep(1) subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env) shutil.rmtree("/tmp/scons_cache", ignore_errors=True) shutil.rmtree("/data/scons_cache", ignore_errors=True) else: print("scons build failed after retry") sys.exit(1) else: # Build failed log errors errors = [line.decode('utf8', 'replace') for line in compile_output if any([err in line for err in [b'error: ', b'not found, needed by target']])] error_s = "\n".join(errors) add_logentries_handler(cloudlog) cloudlog.error("scons build failed\n" + error_s) # Show TextWindow error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors]) with TextWindow("openpilot failed to build\n \n" + error_s) as t: t.wait_for_exit() exit(1) else: break import cereal import cereal.messaging as messaging from common.params import Params import selfdrive.crash as crash from selfdrive.registration import register from selfdrive.version import version, dirty from selfdrive.loggerd.config import ROOT from selfdrive.launcher import launcher from common.apk import update_apks, pm_apply_packages, start_offroad ThermalStatus = cereal.log.ThermalData.ThermalStatus # comment out anything you don't want to run managed_processes = { "thermald": "selfdrive.thermald.thermald", "uploader": "selfdrive.loggerd.uploader", "deleter": "selfdrive.loggerd.deleter", "controlsd": "selfdrive.controls.controlsd", "plannerd": "selfdrive.controls.plannerd", "radard": "selfdrive.controls.radard", "dmonitoringd": "selfdrive.monitoring.dmonitoringd", "ubloxd": ("selfdrive/locationd", ["./ubloxd"]), "loggerd": ("selfdrive/loggerd", ["./loggerd"]), "logmessaged": "selfdrive.logmessaged", "locationd": "selfdrive.locationd.locationd", "tombstoned": "selfdrive.tombstoned", "logcatd": ("selfdrive/logcatd", ["./logcatd"]), "proclogd": ("selfdrive/proclogd", ["./proclogd"]), "boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly "pandad": "selfdrive.pandad", "ui": ("selfdrive/ui", ["./ui"]), "calibrationd": "selfdrive.locationd.calibrationd", "paramsd": "selfdrive.locationd.paramsd", "camerad": ("selfdrive/camerad", ["./camerad"]), "sensord": ("selfdrive/sensord", ["./sensord"]), "clocksd": ("selfdrive/clocksd", ["./clocksd"]), "gpsd": ("selfdrive/sensord", ["./gpsd"]), "updated": "selfdrive.updated", "dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]), "modeld": ("selfdrive/modeld", ["./modeld"]), "rtshield": "selfdrive.rtshield", "lanespeedd": "selfdrive.controls.lib.lane_speed", } daemon_processes = { "manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"), } running: Dict[str, Process] = {} def get_running(): return running # due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption unkillable_processes = ['camerad'] # processes to end with SIGINT instead of SIGTERM interrupt_processes: List[str] = [] # processes to end with SIGKILL instead of SIGTERM kill_processes = ['sensord'] persistent_processes = [ 'thermald', 'logmessaged', 'ui', 'uploader', 'deleter', ] if not PC: persistent_processes += [ # 'updated', 'logcatd', 'tombstoned', 'sensord', ] if not kill_updated: persistent_processes.append('updated') car_started_processes = [ 'controlsd', 'plannerd', 'loggerd', 'radard', 'calibrationd', 'paramsd', 'camerad', 'proclogd', 'locationd', 'clocksd', 'lanespeedd', ] driver_view_processes = [ 'camerad', 'dmonitoringd', 'dmonitoringmodeld' ] if WEBCAM: car_started_processes += [ 'dmonitoringd', 'dmonitoringmodeld', ] if not PC: car_started_processes += [ 'ubloxd', 'dmonitoringd', 'dmonitoringmodeld', ] if ANDROID: car_started_processes += [ 'gpsd', 'rtshield', ] # starting dmonitoringmodeld when modeld is initializing can sometimes \ # result in a weird snpe state where dmon constantly uses more cpu than normal. car_started_processes += ['modeld'] def register_managed_process(name, desc, car_started=False): global managed_processes, car_started_processes, persistent_processes print("registering %s" % name) managed_processes[name] = desc if car_started: car_started_processes.append(name) else: persistent_processes.append(name) # ****************** process management functions ****************** def nativelauncher(pargs, cwd): # exec the process os.chdir(cwd) # because when extracted from pex zips permissions get lost -_- os.chmod(pargs[0], 0o700) os.execvp(pargs[0], pargs) def start_managed_process(name): if name in running or name not in managed_processes: return proc = managed_processes[name] if isinstance(proc, str): cloudlog.info("starting python %s" % proc) running[name] = Process(name=name, target=launcher, args=(proc,)) else: pdir, pargs = proc cwd = os.path.join(BASEDIR, pdir) cloudlog.info("starting process %s" % name) running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd)) running[name].start() def start_daemon_process(name): params = Params() proc, pid_param = daemon_processes[name] pid = params.get(pid_param, encoding='utf-8') if pid is not None: try: os.kill(int(pid), 0) with open(f'/proc/{pid}/cmdline') as f: if proc in f.read(): # daemon is running return except (OSError, FileNotFoundError): # process is dead pass cloudlog.info("starting daemon %s" % name) proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), preexec_fn=os.setpgrp) params.put(pid_param, str(proc.pid)) def prepare_managed_process(p): proc = managed_processes[p] if isinstance(proc, str): # import this python cloudlog.info("preimporting %s" % proc) importlib.import_module(proc) elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")): # build this process cloudlog.info("building %s" % (proc,)) try: subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) except subprocess.CalledProcessError: # make clean if the build failed cloudlog.warning("building %s failed, make clean" % (proc, )) subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0])) subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) def join_process(process, timeout): # Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382 # We have to poll the exitcode instead t = time.time() while time.time() - t < timeout and process.exitcode is None: time.sleep(0.001) def kill_managed_process(name): if name not in running or name not in managed_processes: return cloudlog.info("killing %s" % name) if running[name].exitcode is None: if name in interrupt_processes: os.kill(running[name].pid, signal.SIGINT) elif name in kill_processes: os.kill(running[name].pid, signal.SIGKILL) else: running[name].terminate() join_process(running[name], 5) if running[name].exitcode is None: if name in unkillable_processes: cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name) join_process(running[name], 15) if running[name].exitcode is None: cloudlog.critical("unkillable process %s failed to die!" % name) os.system("date >> /sdcard/unkillable_reboot") HARDWARE.reboot() raise RuntimeError else: cloudlog.info("killing %s with SIGKILL" % name) os.kill(running[name].pid, signal.SIGKILL) running[name].join() cloudlog.info("%s is dead with %d" % (name, running[name].exitcode)) del running[name] def cleanup_all_processes(signal, frame): cloudlog.info("caught ctrl-c %s %s" % (signal, frame)) if ANDROID: pm_apply_packages('disable') for name in list(running.keys()): kill_managed_process(name) cloudlog.info("everything is dead") def send_managed_process_signal(name, sig): if name not in running or name not in managed_processes or \ running[name].exitcode is not None: return cloudlog.info(f"sending signal {sig} to {name}") os.kill(running[name].pid, sig) # ****************** run loop ****************** def manager_init(should_register=True): if should_register: reg_res = register() if reg_res: dongle_id = reg_res else: raise Exception("server registration failed") else: dongle_id = "c"*16 # set dongle id cloudlog.info("dongle id is " + dongle_id) os.environ['DONGLE_ID'] = dongle_id cloudlog.info("dirty is %d" % dirty) if not dirty: os.environ['CLEAN'] = '1' cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True) crash.bind_user(id=dongle_id) crash.bind_extra(version=version, dirty=dirty, is_eon=True) os.umask(0) try: os.mkdir(ROOT, 0o777) except OSError: pass # ensure shared libraries are readable by apks if ANDROID: os.chmod(BASEDIR, 0o755) os.chmod(os.path.join(BASEDIR, "cereal"), 0o755) os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755) def manager_thread(): # now loop thermal_sock = messaging.sub_sock('thermal') cloudlog.info("manager start") cloudlog.info({"environ": os.environ}) # save boot log subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd")) params = Params() # start daemon processes for p in daemon_processes: start_daemon_process(p) # start persistent processes for p in persistent_processes: start_managed_process(p) # start offroad if ANDROID: pm_apply_packages('enable') start_offroad() if os.getenv("NOBOARD") is None: start_managed_process("pandad") if os.getenv("BLOCK") is not None: for k in os.getenv("BLOCK").split(","): del managed_processes[k] started_prev = False logger_dead = False while 1: msg = messaging.recv_sock(thermal_sock, wait=True) if msg.thermal.freeSpace < 0.05: logger_dead = True run_all = False if msg.thermal.started or run_all: for p in car_started_processes: if p == "loggerd" and logger_dead: kill_managed_process(p) else: start_managed_process(p) else: logger_dead = False driver_view = params.get("IsDriverViewEnabled") == b"1" # TODO: refactor how manager manages processes for p in reversed(car_started_processes): if p not in driver_view_processes or not driver_view: kill_managed_process(p) for p in driver_view_processes: if driver_view: start_managed_process(p) else: kill_managed_process(p) # trigger an update after going offroad if started_prev: send_managed_process_signal("updated", signal.SIGHUP) started_prev = msg.thermal.started # check the status of all processes, did any of them die? running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running] cloudlog.debug(' '.join(running_list)) # Exit main loop when uninstall is needed if params.get("DoUninstall", encoding='utf8') == "1": break def manager_prepare(spinner=None): # build all processes os.chdir(os.path.dirname(os.path.abspath(__file__))) # Spinner has to start from 70 here total = 100.0 if prebuilt else 30.0 for i, p in enumerate(managed_processes): if spinner is not None: spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),)) prepare_managed_process(p) def uninstall(): cloudlog.warning("uninstalling") with open('/cache/recovery/command', 'w') as f: f.write('--wipe_data\n') # IPowerManager.reboot(confirm=false, reason="recovery", wait=true) HARDWARE.reboot(reason="recovery") def main(): if ANDROID: # the flippening! os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1') # disable bluetooth os.system('service call bluetooth_manager 8') params = Params() params.manager_start() default_params = [ ("CommunityFeaturesToggle", "0"), ("CompletedTrainingVersion", "0"), ("IsRHD", "0"), ("IsMetric", "0"), ("RecordFront", "0"), ("HasAcceptedTerms", "0"), ("HasCompletedSetup", "0"), ("IsUploadRawEnabled", "1"), ("IsLdwEnabled", "1"), ("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')), ("OpenpilotEnabledToggle", "1"), ("LaneChangeEnabled", "1"), ("IsDriverViewEnabled", "0"), ] # set unset params for k, v in default_params: if params.get(k) is None: params.put(k, v) # is this chffrplus? if os.getenv("PASSIVE") is not None: params.put("Passive", str(int(os.getenv("PASSIVE")))) if params.get("Passive") is None: raise Exception("Passive must be set to continue") if ANDROID: update_apks() manager_init() manager_prepare(spinner) spinner.close() if os.getenv("PREPAREONLY") is not None: return # SystemExit on sigterm signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1)) try: manager_thread() except Exception: traceback.print_exc() crash.capture_exception() finally: cleanup_all_processes(None, None) if params.get("DoUninstall", encoding='utf8') == "1": uninstall() if __name__ == "__main__": try: main() except Exception: add_logentries_handler(cloudlog) cloudlog.exception("Manager failed to start") # Show last 3 lines of traceback error = traceback.format_exc(-3) error = "Manager failed to start\n \n" + error with TextWindow(error) as t: t.wait_for_exit() raise # manual exit because we are forked sys.exit(0)
ircthread.py
#!/usr/bin/env python # Copyright(C) 2011-2016 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re import time import socket import ssl import threading import Queue import irc.client from utils import logger from utils import Hash from version import VERSION out_msg = [] class IrcThread(threading.Thread): def __init__(self, processor, config): threading.Thread.__init__(self) self.processor = processor self.daemon = True options = dict(config.items('server')) self.stratum_tcp_port = options.get('stratum_tcp_port') self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port') self.report_stratum_tcp_port = options.get('report_stratum_tcp_port') self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port') self.irc_bind_ip = options.get('irc_bind_ip') self.host = options.get('host') self.report_host = options.get('report_host') self.nick = options.get('irc_nick') self.irc_prefix = options.get('irc_prefix') if self.report_stratum_tcp_port: self.stratum_tcp_port = self.report_stratum_tcp_port if self.report_stratum_tcp_ssl_port: self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port if self.report_host: self.host = self.report_host if not self.nick: self.nick = Hash(self.host)[:5].encode("hex") if not self.irc_prefix: self.irc_prefix = 'D_' self.pruning = True self.pruning_limit = config.get('leveldb', 'pruning_limit') self.nick = self.irc_prefix + self.nick self.password = None self.who_queue = Queue.Queue() def getname(self): s = 'v' + VERSION + ' ' if self.pruning: s += 'p' + self.pruning_limit + ' ' def add_port(letter, number): DEFAULT_PORTS = {'t':'50001', 's':'50002'} if not number: return '' if DEFAULT_PORTS[letter] == number: return letter + ' ' else: return letter + number + ' ' s += add_port('t',self.stratum_tcp_port) s += add_port('s',self.stratum_tcp_ssl_port) return s def start(self, queue): self.queue = queue threading.Thread.start(self) def on_connect(self, connection, event): connection.join("#electrum-azart") def on_join(self, connection, event): m = re.match("("+self.irc_prefix+".*)!", event.source) if m: self.who_queue.put((connection, m.group(1))) def on_quit(self, connection, event): m = re.match("("+self.irc_prefix+"..*)!", event.source) if m: self.queue.put(('quit', [m.group(1)])) def on_kick(self, connection, event): m = re.match("("+self.irc_prefix+"..*)", event.arguments[0]) if m: self.queue.put(('quit', [m.group(1)])) def on_disconnect(self, connection, event): logger.error("irc: disconnected") raise BaseException("disconnected") def on_who(self, connection, event): line = str(event.arguments[6]).split() try: ip = socket.gethostbyname(line[1]) except: # no IPv4 address could be resolved. Could be .onion or IPv6. ip = line[1] nick = event.arguments[4] host = line[1] ports = line[2:] self.queue.put(('join', [nick, ip, host, ports])) def on_name(self, connection, event): for s in event.arguments[2].split(): if s.startswith(self.irc_prefix): self.who_queue.put((connection, s)) def who_thread(self): while not self.processor.shared.stopped(): try: connection, s = self.who_queue.get(timeout=1) except Queue.Empty: continue #logger.info("who: "+ s) connection.who(s) time.sleep(1) def run(self): while self.processor.shared.paused(): time.sleep(1) self.ircname = self.host + ' ' + self.getname() # avoid UnicodeDecodeError using LenientDecodingLineBuffer irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer logger.info("joining IRC") t = threading.Thread(target=self.who_thread) t.start() while not self.processor.shared.stopped(): client = irc.client.Reactor() try: #bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None #ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address) #c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory) c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname) except irc.client.ServerConnectionError: logger.error('irc', exc_info=True) time.sleep(10) continue c.add_global_handler("welcome", self.on_connect) c.add_global_handler("join", self.on_join) c.add_global_handler("quit", self.on_quit) c.add_global_handler("kick", self.on_kick) c.add_global_handler("whoreply", self.on_who) c.add_global_handler("namreply", self.on_name) c.add_global_handler("disconnect", self.on_disconnect) c.set_keepalive(60) self.connection = c try: client.process_forever() except BaseException as e: logger.error('irc', exc_info=True) time.sleep(10) continue logger.info("quitting IRC")
presence_adapter.py
"""Presence Detection adapter for WebThings Gateway.""" import os import re import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')) import json import time import socket from datetime import datetime, timedelta import threading import subprocess from gateway_addon import Adapter, Database from .presence_device import PresenceDevice from .util import * _TIMEOUT = 3 _CONFIG_PATHS = [ os.path.join(os.path.expanduser('~'), '.webthings', 'config'), ] if 'WEBTHINGS_HOME' in os.environ: _CONFIG_PATHS.insert(0, os.path.join(os.environ['WEBTHINGS_HOME'], 'config')) class PresenceAdapter(Adapter): """Adapter for network presence detection""" def __init__(self, verbose=False): """ Initialize the object. verbose -- whether or not to enable verbose logging """ #print("Initialising adapter from class") self.addon_name = 'network-presence-detection-adapter' self.name = self.__class__.__name__ Adapter.__init__(self, self.addon_name, self.addon_name, verbose=verbose) #print("Adapter ID = " + self.get_id()) self.DEBUG = False #print("self.user_profile['baseDir'] = " + self.user_profile['baseDir']) #self.memory_in_weeks = 10 # How many weeks a device will be remembered as a possible device. self.time_window = 10 # How many minutes should a device be away before we consider it away? self.own_ip = None # We scan only scan if the device itself has an IP address. self.prefered_interface = "eth0" self.selected_interface = "eth0" self.should_brute_force_scan = True self.busy_doing_brute_force_scan = False self.last_brute_force_scan_time = 0 # Allows the add-on to start a brute force scan right away. self.seconds_between_brute_force_scans = 1800 #1800 # 30 minutes self.running = True self.saved_devices = [] self.addon_path = os.path.join(self.user_profile['addonsDir'], self.addon_name) self.persistence_file_path = os.path.join(self.user_profile['dataDir'], self.addon_name,'persistence.json') if self.DEBUG: print("self.persistence_file_path = " + str(self.persistence_file_path)) self.should_save = False try: with open(self.persistence_file_path) as file_object: #print("Loading json..") try: self.previously_found = json.load(file_object) except: #print("Empty json file") self.previously_found = {} #print("Previously found items: = " + str(self.previously_found)) except (IOError, ValueError): self.previously_found = {} print("Failed to load JSON file, generating new one.") try: with open(self.persistence_file_path, 'w') as f: f.write('{}') except Exception as ex: print("failed to create empty persistence file: " + str(ex)) self.previous_found_devices_length = len(self.previously_found) # Reset all the lastseen data from the persistence file, since it could be out of date. for key in self.previously_found: try: if 'lastseen' in self.previously_found[key]: self.previously_found[key]['lastseen'] = None except Exception as ex: print("Error setting lastseen of previously_found devices from persistence to None: " + str(ex)) self.add_from_config() # Here we get data from the settings in the Gateway interface. time.sleep(5) # give it a few more seconds to make sure the network is up self.select_interface() # checks if the preference is possible. if self.DEBUG: print("selected interface = " + str(self.selected_interface)) #self.DEBUG = False try: if self.own_ip == None: self.own_ip = get_ip() except: print("Could not get actual own IP address") # First scan time.sleep(2) # wait a bit before doing the quick scan. The gateway will pre-populate based on the 'handle-device-saved' method. self.arpa_scan() # get initial list of devices from arp -a if self.DEBUG: print("Starting the continous scan clock") try: t = threading.Thread(target=self.clock) t.daemon = True t.start() except: print("Error starting the continous light scan thread") #done = self.brute_force_scan() if self.DEBUG: print("Starting the periodic brute force scan thread") try: b = threading.Thread(target=self.brute_force_scan) b.daemon = True b.start() except: print("Error starting the brute force scan thread") #while self.running: # time.sleep(1) def arpa_scan(self): if self.DEBUG: print("Initiating light scan using arp -a") try: arpa_list = self.arpa() if self.DEBUG: print("Arpa light scan results: " + str(arpa_list)) print("arpa list length: " + str(len(arpa_list))) for key in arpa_list: if self.DEBUG: print("Analyzing ARPA item: " + str(arpa_list[key])) try: if key not in self.previously_found: if self.DEBUG: print("-Adding to previously found list") self.previously_found[key] = {} # adding empty device to the previously found dictionary self.previously_found[key]['name'] = arpa_list[key]['name'] self.previously_found[key]['arpa_time'] = time.time() #arpa_list[key]['arpa_time'] #timestamp of initiation self.previously_found[key]['lastseen'] = None #arpa_list[key]['arpa_time'] #timestamp of initiation self.previously_found[key]['ip'] = arpa_list[key]['ip'] self.previously_found[key]['mac_address'] = arpa_list[key]['mac_address'] self.previously_found[key]['data-collection'] = True self.should_save = True # We will be adding this new device to the list, and then save that updated list. else: # Maybe we found a better name this time. if arpa_list[key]['name'] not in ("","?","unknown"): # superfluous? if self.DEBUG: print("ARPA scan may have found a better hostname, adding it to the previously_found devices dictionary") self.previously_found[key]['name'] = arpa_list[key]['name'] try: self.previously_found[key]['ip'] = arpa_list[key]['ip'] except: print("Error, could not update IP from arpa scan") except Exception as ex: print("Error while analysing ARPA scan result item: " + str(ex)) except Exception as ex: print("Error doing light arpa scan: " + str(ex)) if self.DEBUG: print("light scan using arp -a is done") def brute_force_scan(self): """ Goes over every possible IP adddress in the local network (1-254) to check if it responds to a ping or arping request """ #while self.running: if self.busy_doing_brute_force_scan == False and self.should_brute_force_scan == True: self.busy_doing_brute_force_scan = True self.should_brute_force_scan = False self.last_brute_force_scan_time = time.time() if self.DEBUG: print("Initiating a brute force scan of the entire local network") try: if self.DEBUG: print("OWN IP = " + str(self.own_ip)) if valid_ip(self.own_ip): #while True: #def split_processing(items, num_splits=4): old_previous_found_count = len(self.previously_found) thread_count = 3 #2 #5 split_size = 85 #127 #51 threads = [] for i in range(thread_count): # determine the indices of the list this thread will handle start = i * split_size if self.DEBUG: print("thread start = " + str(start)) # special case on the last chunk to account for uneven splits end = 255 if i+1 == thread_count else (i+1) * split_size if self.DEBUG: print("thread end = " + str(end)) # Create the thread threads.append( threading.Thread(target=self.scan, args=(start, end))) threads[-1].daemon = True threads[-1].start() # start the thread we just created # Wait for all threads to finish for t in threads: t.join() if self.DEBUG: print("Deep scan: all threads are done") # If new devices were found, save the JSON file. if len(self.previously_found) != old_previous_found_count: self.should_save = True if self.should_save: # This is the only time the json file is stored. self.save_to_json() # Remove devices that haven't been spotted in a long time. #list(fdist1.keys()) current_keys = [None] * len(list(self.previously_found.keys())); #Copying all elements of one array into another for a in range(0, len(list(self.previously_found.keys()))): current_keys[a] = list(self.previously_found.keys())[a]; #current_keys = self.previously_found.keys() for key in current_keys: try: if time.time() - self.previously_found[key]['arpa_time'] > 86400 and key not in self.saved_devices: if self.DEBUG: print("Removing devices from found devices list because it hasn't been spotted in a day, and it's not a device the user has imported.") del self.previously_found[key] except Exception as ex: if self.DEBUG: print("Could not remove old device: " + str(ex)) except Exception as ex: print("Error doing brute force scan: " + str(ex)) self.busy_doing_brute_force_scan = False self.should_brute_force_scan = False self.last_brute_force_scan_time = time.time() def clock(self): """ Runs continuously and scans IP addresses that the user has accepted as things """ if self.DEBUG: print("clock thread init") last_run = 0 succesfully_found = 0 # If all devices the user cares about are actually present, then no deep scan is necessary. while self.running: last_run = time.time() try: if time.time() - self.last_brute_force_scan_time > self.seconds_between_brute_force_scans: if self.DEBUG: print("30 minutes have passed since the last brute force scan.") self.last_brute_force_scan_time = time.time() if succesfully_found != len(self.saved_devices): # Avoid doing a deep scan if all devices are present if self.busy_doing_brute_force_scan == False: if self.DEBUG: print("Should brute force scan is now set to true.") self.should_brute_force_scan = True else: if self.DEBUG: print("Should brute force scan, but already doing brute force scan") else: if self.DEBUG: print("all devices present and accounted for. Will skip brute force scan.") except Exception as ex: print("Clock: error running periodic deep scan: " + str(ex)) succesfully_found = 0 try: #if self.DEBUG: #print("") #print("CLOCK TICK") for key in self.previously_found: if self.DEBUG: print("clock -> " + str(key)) # Update device's last seen properties try: # Make sure all devices and properties exist. Should be superfluous really. #print("clock - str(key) = " + str(key) + " has " + str(self.previously_found[key])) if str(key) not in self.devices: #print(str(self.previously_found[str(key)]) + " was not turned into an internal devices object yet.") detail = "..." try: detail = self.previously_found[key]['ip'] except: if self.DEBUG: print("No IP address in previously found list (yet)") continue new_name = "Unknown" try: new_name = self.previously_found[key]['name'] except: if self.DEBUG: print("No name present in previously found list") if new_name == "Unknown" or new_name == "?" or new_name == "": if self.DEBUG: print("No good name found yet, skipping device generation and update") continue self._add_device(key, new_name, detail) # The device did not exist yet, so we're creating it. try: if self.previously_found[key]['lastseen'] != 0 and self.previously_found[key]['lastseen'] != None: minutes_ago = int((time.time() - self.previously_found[key]['lastseen']) / 60) else: minutes_ago = None except Exception as ex: minutes_ago = None if self.DEBUG: print("Minutes ago issue: " + str(ex)) try: if 'minutes_ago' not in self.devices[key].properties: if self.DEBUG: print("+ Adding minutes ago property to presence device") self.devices[key].add_integer_child('minutes_ago', "Minutes ago last seen", minutes_ago) else: # minutes_ago != None: if self.DEBUG: print("SETTING MINUTES AGO TO NONE") self.devices[key].properties['minutes_ago'].update(minutes_ago) except Exception as ex: print("Could not add minutes_ago property" + str(ex)) try: recently = None if minutes_ago != None: if minutes_ago > self.time_window: recently = False else: recently = True if 'recently1' not in self.devices[key].properties: if self.DEBUG: print("+ Adding recently spotted property to presence device") self.devices[key].add_boolean_child('recently1', "Recently spotted", recently) else: self.devices[key].properties['recently1'].update(recently) except Exception as ex: print("Could not add recently spotted property" + str(ex)) if 'data-collection' not in self.devices[key].properties: if self.DEBUG: print("+ Adding recently spotted property to presence device") data_collection_state = True if 'data-collection' in self.previously_found[key]: data_collection_state = self.previously_found[key]['data-collection'] self.devices[key].add_boolean_child('data-collection', "Data collection", data_collection_state, False, False) # name, title, value, readOnly, add @type except Exception as ex: print("Could not create or update property. Error: " + str(ex)) # Scan the devices the user cares about for key in self.saved_devices: if str(key) not in self.previously_found: if self.DEBUG: print("Saved thing was not found through scanning yet (not yet added to previously_found), skipping update attempt") continue if self.DEBUG: print("") print("CLOCK: key from saved devices:" + str(key)) if self.DEBUG: print("Saved device ID " + str(key) + " was also in previously found list. Trying scan.") # Try doing a Ping and then optionally an Arping request if there is a valid IP Address try: if self.DEBUG: print("IP from previously found list: " + str(self.previously_found[key]['ip'])) if 'data-collection' in self.previously_found[key]: if self.previously_found[key]['data-collection'] == True: if self.DEBUG: print("- data-collection is allowed.") if 'ip' in self.previously_found[key]: if self.ping(self.previously_found[key]['ip'],1): if self.DEBUG: print(">> Ping could not find device at " + str(self.previously_found[key]['ip']) + ". Maybe Arping can.") try: if self.arping(self.previously_found[key]['ip'], 1) == 0: self.previously_found[key]['lastseen'] = int(time.time()) if self.DEBUG: print(">> Arping found it.") succesfully_found += 1 else: if self.DEBUG: print(">> Arping also could not find the device.") except Exception as ex: print("Error trying Arping: " + str(ex)) else: if self.DEBUG: print(">> Ping found device") self.previously_found[key]['lastseen'] = int(time.time()) succesfully_found += 1 else: if self.DEBUG: print("-data-collection is not allowed for this thing, skipping ping.") else: if self.DEBUG: print("clock: data-collection property did not exist yet in this thing, adding it now.") self.previously_found[key]['data-collection'] = True except Exception as ex: if self.DEBUG: print("Was not able to scan device from saved_devices list: " + str(ex)) except Exception as ex: print("Clock thread error: " + str(ex)) saved_devices_count = len(self.saved_devices) scan_time_delta = time.time() - last_run if self.DEBUG: print("pinging all devices took this many seconds: " + str(scan_time_delta)) print("saved_devices_count = " + str(saved_devices_count)) print("Waiting 5 seconds before scanning all devices again") if scan_time_delta < 55: if self.DEBUG: print("scan took less than a minute. Will wait before starting the next round: " + str(scan_time_delta) ) delay = 55 - scan_time_delta time.sleep(delay) time.sleep(5) def handle_device_saved(self, device_id, device): """User saved a thing. Also called when the add-on starts.""" try: if device_id.startswith('presence'): if self.DEBUG: print("handle_device_saved. device_id = " + str(device_id) + ", device = " + str(device)) if device_id not in self.saved_devices: #print("Adding to saved_devices list: " + str(device_id.split("-")[1])) if self.DEBUG: print("Added " + str(device['title']) + " to saved devices list") original_title = "Unknown" try: if str(device['title']) != "": original_title = str(device['title']) except: print("Error getting original_title from data provided by the Gateway") #self.saved_devices.append({device_id:{'name':original_title}}) self.saved_devices.append(device_id) data_collection = True try: if 'data-collection' in device['properties']: data_collection = bool(device['properties']['data-collection']['value']) except Exception as ex: print("Error getting data collection preference from saved device update info: " + str(ex)) #print("Data_collection value is now: " + str(data_collection)) try: #pass if device_id not in self.previously_found: if self.DEBUG: print("Populating previously_found from handle_device_saved") self.previously_found[device_id] = {} self.previously_found[device_id]['name'] = str(device['title']) self.previously_found[device_id]['lastseen'] = None self.previously_found[device_id]['arpa_time'] = int(time.time()) self.previously_found[device_id]['data-collection'] = bool(data_collection) except Exception as ex: print("Error adding to found devices list: " + str(ex)) except Exception as ex: print("Error dealing with existing saved devices: " + str(ex)) def unload(self): """Add-on is shutting down.""" if self.DEBUG: print("Network presence detector is being unloaded") self.save_to_json() self.running = False def remove_thing(self, device_id): """User removed a thing from the interface.""" if self.DEBUG: print("Removing presence detection device") try: #print("THING TO REMOVE:" + str(self.devices[device_id])) del self.previously_found[device_id] #print("2") obj = self.get_device(device_id) #print("3") self.handle_device_removed(obj) if self.DEBUG: print("Succesfully removed presence detection device") except: print("Removing presence detection thing failed") #del self.devices[device_id] self.should_save = True # saving changes to the json persistence file def scan(self, start, end): """Part of the brute force scanning function, which splits out the scanning over multiple threads.""" #self.should_save = False # We only save found devices to a file if new devices have been found during this scan. # skip broadcast addresses if start == 0: start = 1 if end == 255: end = 254 for ip_byte4 in range(start, end): ip_address = str(self.own_ip[:self.own_ip.rfind(".")]) + "." + str(ip_byte4) if self.DEBUG: print(ip_address) # Skip our own IP address. if ip_address == self.own_ip: continue ping_count = 1 alive = False # holds whether we got any response. if self.ping(ip_address, ping_count) == 0: # 0 means everything went ok, so a device was found. alive = True else: try: if self.arping(ip_address, ping_count) == 0: # 0 means everything went ok, so a device was found. alive = True except Exception as ex: print("Error trying Arping: " + str(ex)) # If either ping or arping found a device: try: if alive: output = self.arp(ip_address) if self.DEBUG: print(str(ip_address) + " IS ALIVE: " + str(output)) mac_addresses = re.findall(r'(([0-9a-fA-F]{1,2}:){5}[0-9a-fA-F]{1,2})', output) now = int(time.time()) if len(mac_addresses) > 0: mac_address = mac_addresses[0][0] mac_address = ':'.join([ # Left pad the MAC address parts with '0' in case of # invalid output (as on macOS) '0' * (2 - len(x)) + x for x in mac_address.split(':') ]) if not valid_mac(mac_address): if self.DEBUG: print("Deep scan: MAC address was not valid") continue mac_short = mac_address.replace(":", "") _id = 'presence-{}'.format(mac_short) if self.DEBUG: print("early mac = " + mac_address) # Get the basic variables found_device_name = output.split(' ')[0] if self.DEBUG: print("early found device name = " + found_device_name) try: possible_name = self.get_optimal_name(ip_address, found_device_name, mac_address) except: if self.DEBUG: print("Reverting to found_device_name instead of optimal name") possible_name = "Presence - " + str(found_device_name) if self.DEBUG: print("optimal possible name = " + possible_name) if _id not in self.previously_found: self.previously_found[str(_id)] = {} # adding it to the internal object if self.DEBUG: print("--mac: " + mac_address) print("--name: " + possible_name) print("--_id: " + _id) self.previously_found[_id]['arpa_time'] = now # creation time self.previously_found[_id]['mac_address'] = mac_address self.previously_found[_id]['lastseen'] = now self.previously_found[_id]['name'] = str(possible_name) # The name may be better, or it may have changed. self.previously_found[_id]['ip'] = ip_address except Exception as ex: print("Brute force scan: error updating items in the previously_found dictionary: " + str(ex)) time.sleep(5) def get_optimal_name(self,ip_address,found_device_name="",mac_address=""): # Try to get hostname nmb_result = "" #try: #nmb_result = socket.gethostbyaddr(ip_address) #nmb_result = hostname_lookup(ip_address) # nmb_result,alias,addresslist = hostname_lookup(ip_address) # print("socket.gethostbyaddr(ip_address) gave: " + str(nmb_result)) #except Exception as ex: # print("socket.gethostbyaddr(ip_address) error: " + str(ex)) # This only works if Samba is installed, and it isn't installed by default #try: # nmb_result = nmblookup(ip_address) # if self.DEBUG: # print("nmblookup result = " + str(nmb_result)) #except Exception as ex: # if self.DEBUG: # print("Error doing nmblookup: " + str(ex)) if nmb_result == "": #if self.DEBUG: # print("NMB lookup result was an empty string") # Round 2: analyse MAC address if found_device_name == '?' or found_device_name == '' or valid_ip(found_device_name): if self.DEBUG: print("Will try to figure out a vendor name based on the mac address") vendor = 'unnamed' try: # Get the vendor name, and shorten it. It removes # everything after the comma. Thus "Apple, inc" # becomes "Apple" vendor = get_vendor(mac_address) if vendor is not None: vendor = vendor.split(' ', 1)[0] vendor = vendor.split(',', 1)[0] else: vendor = 'unnamed' except ValueError: pass found_device_name = vendor else: found_device_name = nmb_result # At this point we definitely have something. found_device_name = "Presence - " + found_device_name possible_name = found_device_name if self.DEBUG: print("--possible name (may be duplicate): " + str(found_device_name)) # Create or update items in the previously_found dictionary try: mac_short = mac_address.replace(":", "") _id = 'presence-{}'.format(mac_short) #for item in self.previously_found: #if self.DEBUG: # print("ADDING NEW FOUND DEVICE TO FOUND DEVICES LIST") #self.should_save = True # We will be adding this new device to the list, and then save that updated list. i = 2 # We skip "1" as a number. So we will get names like "Apple" and then "Apple 2", "Apple 3", and so on. #possible_name = found_device_name could_be_same_same = True while could_be_same_same is True: # We check if this name already exists in the list of previously found devices. could_be_same_same = False try: for key in self.previously_found: if self.DEBUG: print("-checking possible name '" + str(possible_name) + "' against: " + str(self.previously_found[key]['name'])) print("--prev found device key = " + str(key)) # We skip checking for name duplication if the potential new device is the exact same device, so it would be logical if they had the same name. if str(key) == str(_id): if self.DEBUG: print("key == _id") if possible_name == str(self.previously_found[key]['name']): if self.DEBUG: print("the new name is the same as the old for this mac-address") #continue break if possible_name == str(self.previously_found[key]['name']): # The name already existed somewhere in the list, so we change it a little bit and compare again. could_be_same_same = True if self.DEBUG: print("-names collided: " + str(possible_name)) possible_name = found_device_name + " " + str(i) if self.DEBUG: print("-now testing new name: " + str(possible_name)) i += 1 # up the count for a potential next round if i > 200: if self.DEBUG: print("Reached 200 limit in while loop") # if the user has 200 of the same device, that's incredible. break except Exception as ex: print("Error doing name check in while loop: " + str(ex)) except Exception as ex: print("Error in name duplicate check: " + str(ex)) return possible_name def _add_device(self, mac, name, details): """ Add the given device, if necessary. """ try: #print("adapter._add_device: " + str(name)) device = PresenceDevice(self, mac, name, details) self.handle_device_added(device) #print("-Adapter has finished adding new device for mac " + str(mac)) except Exception as ex: print("Error adding new device: " + str(ex)) return def add_from_config(self): """Attempt to add all configured devices.""" try: database = Database(self.addon_name) if not database.open(): return config = database.load_config() database.close() if not config or 'Time window' not in config: print("Error: required variables not found in config database. Check the addon's settings.") return try: self.DEBUG = bool(config['Debugging']) # The variable is clamped: it is forced to be between 1 and 50. except: print("No debugging preference was found in the settings") # Target IP # Can be used to override normal behaviour (which is to scan the controller's neighbours), and target a very different group of IP addresses. if 'Target IP' in config: try: potential_ip = str(config['Target IP']) if valid_ip(potential_ip): self.own_ip = potential_ip print("A target IP was in settings") else: print("This addon does not understand '" + str(potential_ip) + "' as a valid IP address. Go to the add-on settings page to fix this. For now, the addon will try to detect and use the system's IP address as a base instead.") except: print("Error handling Target IP setting") else: if self.DEBUG: print("No target IP address was available in the settings data") # Network interface preference if 'Network interface' in config: if str(config['Network interface']) != "": if str(config['Network interface']) == "prefer wired": self.prefered_interface = "eth0" if str(config['Network interface']) == "prefer wireless": self.prefered_interface = "wlan0" if 'Time window' in config: try: self.time_window = clamp(int(config['Time window']), 1, 10800) # In minutes. 'Grace period' could also be a good name. if self.DEBUG: print("Time window value from settings page: " + str(self.time_window)) except: print("No time window preference was found in the settings. Will use default.") except: print("Error getting config data from database. Check the add-on's settings page for any issues.") def save_to_json(self): """Save found devices to json file.""" try: if self.DEBUG: print("Saving updated list of found devices to json file") #if self.previously_found: #with open(self.persistence_file_path, 'w') as fp: #json.dump(self.previously_found, fp) j = json.dumps(self.previously_found, indent=4) # Pretty printing to the file f = open(self.persistence_file_path, 'w') print(j, file=f) f.close() except Exception as ex: print("Saving to json file failed: " + str(ex)) self.should_save = False def start_pairing(self, timeout): """Starting the pairing process.""" self.arpa_scan() if self.busy_doing_brute_force_scan == False: self.should_brute_force_scan = True def cancel_pairing(self): """Cancel the pairing process.""" self.save_to_json() # # This gives a quick first initial impression of the network. # def arpa(self): command = "arp -a" device_list = {} try: result = subprocess.run(command, shell=True, universal_newlines=True, stdout=subprocess.PIPE) #.decode()) for line in result.stdout.split('\n'): if not "<incomplete>" in line and len(line) > 10: if self.DEBUG: print("checking arp -a line: " + str(line)) name = "?" mac_short = "" try: mac_address_list = re.findall(r'(([0-9a-fA-F]{1,2}:){5}[0-9a-fA-F]{1,2})', str(line))[0] #print(str(mac_address_list)) mac_address = str(mac_address_list[0]) #print(str(mac_address)) mac_short = str(mac_address.replace(":", "")) _id = 'presence-{}'.format(mac_short) except Exception as ex: print("getting mac from arp -a line failed: " + str(ex)) try: ip_address_list = re.findall(r'(?:\d{1,3}\.)+(?:\d{1,3})', str(line)) #print("ip_address_list = " + str(ip_address_list)) ip_address = str(ip_address_list[0]) if not valid_ip(ip_address): continue except Exception as ex: print("no IP address in line: " + str(ex)) try: found_device_name = str(line.split(' (')[0]) if _id not in self.previously_found: possible_name = self.get_optimal_name(ip_address, found_device_name, mac_address) else: possible_name = self.previously_found[_id]['name'] except Exception as ex: print("Error: could not get name from arp -a line: " + str(ex)) if mac_short != "": #print("util: arp: mac in line: " + line) #item = {'ip':ip_address,'mac':mac_address,'name':name, 'mac_short':mac_address.replace(":", "")} #return str(line) device_list[_id] = {'ip':ip_address,'mac_address':mac_address,'name':possible_name,'arpa_time':int(time.time()),'lastseen':None} #print("device_list = " + str(device_list)) #return str(result.stdout) except Exception as ex: print("Arp -a error: " + str(ex)) #result = 'error' return device_list #return str(subprocess.check_output(command, shell=True).decode()) def select_interface(self): eth0_output = subprocess.check_output(['ifconfig', 'eth0']).decode('utf-8') #print("eth0_output = " + str(eth0_output)) wlan0_output = subprocess.check_output(['ifconfig', 'wlan0']).decode('utf-8') #print("wlan0_output = " + str(wlan0_output)) if "inet " in eth0_output and self.prefered_interface == "eth0": self.selected_interface = "eth0" if not "inet " in eth0_output and self.prefered_interface == "eth0": self.selected_interface = "wlan0" if "inet " in wlan0_output and self.prefered_interface == "wlan0": self.selected_interface = "wlan0" def ping(self, ip_address, count): param = '-n' if platform.system().lower() == 'windows' else '-c' #command = ["ping", param, count, "-i", 1, str(ip_address)] command = "ping -I " + str(self.selected_interface) + " " + str(param) + " " + str(count) + " -i 0.5 " + str(ip_address) #print("command: " + str(command)) #return str(subprocess.check_output(command, shell=True).decode()) try: result = subprocess.run(command, shell=True, universal_newlines=True, stdout=subprocess.DEVNULL) #.decode()) #print("ping done") return result.returncode except Exception as ex: print("error pinging! Error: " + str(ex)) return 1 def arping(self, ip_address, count): param = '-n' if platform.system().lower() == 'windows' else '-c' command = "sudo arping -i " + str(self.selected_interface) + " " + str(param) + " " + str(count) + " " + str(ip_address) #print("command: " + str(command)) try: result = subprocess.run(command, shell=True, universal_newlines=True, stdout=subprocess.DEVNULL) #.decode()) return result.returncode except Exception as ex: print("error arpinging! Error: " + str(ex)) return 1 def arp(self, ip_address): if valid_ip(ip_address): command = "arp -i " + str(self.selected_interface) + " " + str(ip_address) try: result = subprocess.run(command, shell=True, universal_newlines=True, stdout=subprocess.PIPE) #.decode()) for line in result.stdout.split('\n'): mac_addresses = re.findall(r'(([0-9a-fA-F]{1,2}:){5}[0-9a-fA-F]{1,2})', str(line)) if len(mac_addresses): #print("util: arp: mac in line: " + line) return str(line) return str(result.stdout) except Exception as ex: print("Arp error: " + str(ex)) result = 'error' return result #return str(subprocess.check_output(command, shell=True).decode())
pyPadWormbk1.py
# for monster data of puzzle and dragon 下载战友网图片和资料 import os from queue import Queue import re import time import threading import urllib.request import urllib.error import bs4 import lxml #replace url to start your own download url = "http://pad.skyozora.com/pets/" typestr = ["龍","神","惡魔","機械", "平衡","攻擊","體力","回復", "進化用","能力覚醒用","強化合成用","売却用"] MaxThread = 40 pagecount = 0 countstep = 100 dirname = os.getcwd() print_lock = threading.Lock() data_q = Queue() textlist = [] class Monster: id = 0 jpname = "" cnname = "" type = [] skill = "" lskill = "" def __init__(self,id,jpname,cnname,type,skill,lskill): self.id = id self.jpname = jpname self.cnname = cnname self.type = type self.skill = skill self.lskill = lskill def mkdir(path): tmppath = os.getcwd()+"\\"+path try: os.makedirs(tmppath) except: print("DIR exist!") return tmppath # def saveImg(imgUrl): # time.sleep(0.05) # with print_lock: # # =========data set # monid = imgUrl.split('/')[-1] # monimg = "" # monjp = "" # moncn = "" # montype = [] # monskill = "" # monlskill = "" # global pagecount # # ================= # try: # raw = urllib.request.urlopen(imgUrl) # rawurl = raw.read() # raw.close() # # get info # soup = bs4.BeautifulSoup(rawurl, "lxml") # tmpimgaddr = soup.find("body").find_all("table") # for iaddr in tmpimgaddr: # tmptable = iaddr.find_all("table") # if (tmptable != []): # for itable in tmptable: # tmptag = itable.find("h3") # if (tmptag != None): # monimg = itable.find("img")['src'] # monjp = itable.find("h3").string # moncn = itable.find("h2").string # skill = itable.find("td", colspan="5") # if (skill != None): # monskill = skill # lskill = itable.find("td", colspan="2") # if (lskill != None): # tlskil = lskill.find("span") # if (tlskil == None): # monlskill = lskill # for istr in range(0, len(typestr)): # type1 = soup.find_all("a", title=typestr[istr]) # if (type1 != []): # montype.append(istr) # # print(str(monid)+"done") # # print(monimg) # # print(monjp) # # print(moncn) # # print(monskill) # # print(monlskill) # # print("------------------") # # save img # with urllib.request.urlopen(monimg) as imghtml: # rawimg = imghtml.read() # with open(str(monid).zfill(4)+".png",'wb') as file: # file.write(rawimg) # # update list # print(pagecount, " less than", monid) # if (monid > pagecount ): # print(pagecount," less than",monid) # pagecount += countstep # simglist = [] # for i in range(pagecount, pagecount + countstep): # tmpurl = url + str(i) # data_q.put(tmpurl) # for iimg in simglist: # data_q.put(iimg) # simglist.clear() # print("update list") # # except: # print("error:", monid, " not exist") # pass # # def worker(): # while True: # tmpUrl = data_q.get() # saveImg(tmpUrl) # data_q.task_done() def main(): # for x in range(MaxThread): # t = threading.Thread(target = worker) # t.daemon = True # t.start() start = time.time() os.chdir(mkdir("monster")) pagecount = 0 countstep = 100 #---------------------------------- while(True): weldonecount = 0 list = [] for i in range(pagecount,pagecount+countstep): # =========data set monid = i monimg = "" monjp = "" moncn = "" montype = [] monskill = "" monlskill = "" # ================= imgUrl = url+str(i) try: raw = urllib.request.urlopen(imgUrl) rawurl = raw.read() raw.close() # get info soup = bs4.BeautifulSoup(rawurl, "lxml") tmpimgaddr = soup.find("body").find_all("table") for iaddr in tmpimgaddr: tmptable = iaddr.find_all("table") if (tmptable != []): for itable in tmptable: tmptag = itable.find("h3") if (tmptag != None): monimg = itable.find("img")['src'] monjp = itable.find("h3").string moncn = itable.find("h2").string skill = itable.find("td", colspan="5") if (skill != None): monskill = skill lskill = itable.find("td", colspan="2") if (lskill != None): tlskil = lskill.find("span") if (tlskil == None): monlskill = lskill for istr in range(0, len(typestr)): type1 = soup.find_all("a", title=typestr[istr]) if (type1 != []): montype.append(istr) if(monimg!=""): print(str(monid)+"\tdone") else: continue list.append(Monster(monid,monjp,moncn,montype,monskill,monlskill)) # print(monimg) # print(monjp) # print(moncn) # print(monskill) # print(monlskill) # print("------------------") # save img with urllib.request.urlopen(monimg) as imghtml: rawimg = imghtml.read() with open(str(monid).zfill(4)+".png",'wb') as file: file.write(rawimg) # update list weldonecount +=1 except: pass if(weldonecount > 0): pagecount+=countstep with open(str(pagecount)+".txt", "w", encoding='utf-8') as htmlfile: for ihtml in list: istr = str(ihtml.id) + "\t" + ihtml.jpname +"\t"+ihtml.cnname+"\n" for i in ihtml.type: istr = istr + typestr[i]+"\t" istr+="\n"+str(ihtml.skill)+"\n"+str(ihtml.lskill)+"\n" htmlfile.write(istr) list.clear() else: break print("entire job took:", time.time()-start) if __name__ == '__main__': main()